diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 0000000..772dc93 --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,16 @@ +name: CI +on: + push: + branches: [main] + pull_request: + workflow_dispatch: + +jobs: + lint: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 + - uses: pre-commit/action@v3.0.1 + with: + extra_args: --all-files diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000..ee40a96 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,16 @@ +repos: + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v5.0.0 + hooks: + - id: trailing-whitespace + - id: end-of-file-fixer + - id: check-yaml + - id: check-json + - id: check-merge-conflict + - id: check-added-large-files + + - repo: https://github.com/tekwizely/pre-commit-golang + rev: v1.0.0-rc.2 + hooks: + - id: go-mod-tidy + - id: go-fmt diff --git a/README.md b/README.md index cd3428b..93c1cb5 100644 --- a/README.md +++ b/README.md @@ -1 +1,50 @@ -# codspeed-go \ No newline at end of file +
+

codspeed-go

+ +[![CI](https://github.com/CodSpeedHQ/codspeed-go/actions/workflows/ci.yml/badge.svg?branch=main)](https://github.com/CodSpeedHQ/codspeed-go/actions/workflows/ci.yml) +[![Discord](https://img.shields.io/badge/chat%20on-discord-7289da.svg)](https://discord.com/invite/MxpaCfKSqF) +[![CodSpeed Badge](https://img.shields.io/endpoint?url=https://codspeed.io/badge.json)](https://codspeed.io/CodSpeedHQ/codspeed-go) + +
+ +This repo contains the integration libraries for using CodSpeed in Go: + +- [`go-runner`](./go-runner/): Golang benchmark builder and runner +- [`compat/testing`](./compat/testing/): Compatibility layer for the `testing` package. + +## Usage + +Integrating CodSpeed into your Go codebase requires **no modification**. You can continue using `go test` and the `testing` package as you normally would. When running your benchmarks in CI with CodSpeed, we will manually build and run the benchmarks and report the results to CodSpeed. + +For information on how to integrate it, see the [CodSpeed documentation](https://codspeed.io/docs/benchmarks/golang). If you need further information to integrate CodSpeed to your project, please feel free to open an issue or ask for help on our discord server. + + +## Manual Usage + +To run the benchmarks with CodSpeed locally, you need to install the `go-runner` crate which is used to build and execute the benchmarks with instrumentation: +```bash +$ cd go-runner +$ cargo install --path . +``` + +Then you can run the benchmarks with (the syntax is equivalent to `go test` but supports fewer flags). This will print all the benchmarks that can be run with CodSpeed and warnings if some benchmarks are not supported. +```bash +$ cd example +$ export CODSPEED_PROFILE_FOLDER=/tmp/codspeed +$ go-runner test -bench=. +[INFO go_runner] Discovered 1 package +[INFO go_runner] Total benchmarks discovered: 2 +[INFO go_runner] Found BenchmarkFibonacci10 in "fib_test.go" +[INFO go_runner] Found BenchmarkFibonacci20_Loop in "fib_test.go" +[INFO go_runner] Generating custom runner for package: example +[INFO go_runner] Running benchmarks for package: example +Running with CodSpeed instrumentation +goos: linux +goarch: amd64 +cpu: 12th Gen Intel(R) Core(TM) i7-1260P @ 1672.130MHz +BenchmarkFibonacci10/fibonacci(10)/fibonacci(10)-16 1 1523 ns/op +BenchmarkFibonacci20_Loop-16 1 31373 ns/op +PASS +[INFO go_runner] Parsed 2 raw results +[INFO go_runner] Results written to "/tmp/codspeed/results/177951.json" +``` diff --git a/compat/testing/testing.go b/compat/testing/testing.go new file mode 100644 index 0000000..bfc87ce --- /dev/null +++ b/compat/testing/testing.go @@ -0,0 +1,70 @@ +//go:build codspeed +// +build codspeed + +package codspeed + +import ( + codspeed_testing "github.com/CodSpeedHQ/codspeed-go/testing/testing" +) + +type B = codspeed_testing.B +type BenchmarkResult = codspeed_testing.BenchmarkResult +type Cover = codspeed_testing.Cover +type CoverBlock = codspeed_testing.CoverBlock +type F = codspeed_testing.F +type InternalBenchmark = codspeed_testing.InternalBenchmark +type InternalExample = codspeed_testing.InternalExample +type InternalFuzzTarget = codspeed_testing.InternalFuzzTarget +type InternalTest = codspeed_testing.InternalTest +type M = codspeed_testing.M +type PB = codspeed_testing.PB +type T = codspeed_testing.T +type TB = codspeed_testing.TB + +func AllocsPerRun(runs int, f func()) (avg float64) { + return codspeed_testing.AllocsPerRun(runs, f) +} + +func CoverMode() string { + return codspeed_testing.CoverMode() +} + +func Coverage() float64 { + return codspeed_testing.Coverage() +} + +func Init() { + codspeed_testing.Init() +} + +func Main(matchString func(pat, str string) (bool, error), tests []codspeed_testing.InternalTest, benchmarks []codspeed_testing.InternalBenchmark, examples []codspeed_testing.InternalExample) { + codspeed_testing.Main(matchString, tests, benchmarks, examples) +} + +func RegisterCover(c codspeed_testing.Cover) { + codspeed_testing.RegisterCover(c) +} + +func RunBenchmarks(matchString func(pat, str string) (bool, error), benchmarks []codspeed_testing.InternalBenchmark) { + codspeed_testing.RunBenchmarks(matchString, benchmarks) +} + +func RunExamples(matchString func(pat, str string) (bool, error), examples []codspeed_testing.InternalExample) (ok bool) { + return codspeed_testing.RunExamples(matchString, examples) +} + +func RunTests(matchString func(pat, str string) (bool, error), tests []codspeed_testing.InternalTest) (ok bool) { + return codspeed_testing.RunTests(matchString, tests) +} + +func Short() bool { + return codspeed_testing.Short() +} + +func Testing() bool { + return codspeed_testing.Testing() +} + +func Verbose() bool { + return codspeed_testing.Verbose() +} diff --git a/compat/testing/testing_compat.go b/compat/testing/testing_compat.go new file mode 100644 index 0000000..1131edd --- /dev/null +++ b/compat/testing/testing_compat.go @@ -0,0 +1,70 @@ +//go:build !codspeed +// +build !codspeed + +package codspeed + +import ( + stdtesting "testing" +) + +type B = stdtesting.B +type BenchmarkResult = stdtesting.BenchmarkResult +type Cover = stdtesting.Cover +type CoverBlock = stdtesting.CoverBlock +type F = stdtesting.F +type InternalBenchmark = stdtesting.InternalBenchmark +type InternalExample = stdtesting.InternalExample +type InternalFuzzTarget = stdtesting.InternalFuzzTarget +type InternalTest = stdtesting.InternalTest +type M = stdtesting.M +type PB = stdtesting.PB +type T = stdtesting.T +type TB = stdtesting.TB + +func AllocsPerRun(runs int, f func()) (avg float64) { + return stdtesting.AllocsPerRun(runs, f) +} + +func CoverMode() string { + return stdtesting.CoverMode() +} + +func Coverage() float64 { + return stdtesting.Coverage() +} + +func Init() { + stdtesting.Init() +} + +func Main(matchString func(pat, str string) (bool, error), tests []stdtesting.InternalTest, benchmarks []stdtesting.InternalBenchmark, examples []stdtesting.InternalExample) { + stdtesting.Main(matchString, tests, benchmarks, examples) +} + +func RegisterCover(c stdtesting.Cover) { + stdtesting.RegisterCover(c) +} + +func RunBenchmarks(matchString func(pat, str string) (bool, error), benchmarks []stdtesting.InternalBenchmark) { + stdtesting.RunBenchmarks(matchString, benchmarks) +} + +func RunExamples(matchString func(pat, str string) (bool, error), examples []stdtesting.InternalExample) (ok bool) { + return stdtesting.RunExamples(matchString, examples) +} + +func RunTests(matchString func(pat, str string) (bool, error), tests []stdtesting.InternalTest) (ok bool) { + return stdtesting.RunTests(matchString, tests) +} + +func Short() bool { + return stdtesting.Short() +} + +func Testing() bool { + return stdtesting.Testing() +} + +func Verbose() bool { + return stdtesting.Verbose() +} diff --git a/example-codspeed/cli/runner.go b/example-codspeed/cli/runner.go new file mode 100644 index 0000000..3167509 --- /dev/null +++ b/example-codspeed/cli/runner.go @@ -0,0 +1,86 @@ +//go:build codspeed +// +build codspeed + +package main + +import ( + "fmt" + "io" + "reflect" + "time" + + example "example" + + codspeed_testing "github.com/CodSpeedHQ/codspeed-go/testing/testing" +) + +type corpusEntry = struct { + Parent string + Path string + Data []byte + Values []any + Generation int + IsSeed bool +} + +type simpleDeps struct{} + +func (d simpleDeps) ImportPath() string { return "" } +func (d simpleDeps) MatchString(pat, str string) (bool, error) { return true, nil } +func (d simpleDeps) SetPanicOnExit0(bool) {} +func (d simpleDeps) StartCPUProfile(io.Writer) error { return nil } +func (d simpleDeps) StopCPUProfile() {} +func (d simpleDeps) StartTestLog(io.Writer) {} +func (d simpleDeps) StopTestLog() error { return nil } +func (d simpleDeps) WriteProfileTo(string, io.Writer, int) error { return nil } + +func (d simpleDeps) CoordinateFuzzing( + fuzzTime time.Duration, + fuzzN int64, + minimizeTime time.Duration, + minimizeN int64, + parallel int, + corpus []corpusEntry, + types []reflect.Type, + corpusDir, + cacheDir string, +) error { + return nil +} +func (d simpleDeps) RunFuzzWorker(fn func(corpusEntry) error) error { + return nil +} +func (d simpleDeps) ReadCorpus(dir string, types []reflect.Type) ([]corpusEntry, error) { + return nil, nil +} +func (d simpleDeps) CheckCorpus(vals []any, types []reflect.Type) error { + return nil +} +func (d simpleDeps) ResetCoverage() {} +func (d simpleDeps) SnapshotCoverage() {} +func (d simpleDeps) InitRuntimeCoverage() (mode string, tearDown func(coverprofile string, gocoverdir string) (string, error), snapcov func() float64) { + return "", nil, nil +} + +func main() { + var tests = []codspeed_testing.InternalTest{} + var fuzzTargets = []codspeed_testing.InternalFuzzTarget{} + var examples = []codspeed_testing.InternalExample{} + var benchmarks = []codspeed_testing.InternalBenchmark{ + { + Name: "BenchmarkFibonacci10", + F: example.BenchmarkFibonacci10, + }, + { + Name: "BenchmarkFibonacci20", + F: example.BenchmarkFibonacci20, + }, + } + + for i := 0; i < len(benchmarks); i++ { + fmt.Printf("Benchmark %d: %s\n", i, benchmarks[i].Name) + } + + m := codspeed_testing.MainStart(simpleDeps{}, tests, benchmarks, fuzzTargets, examples) + m.Run() +} diff --git a/example-codspeed/fib.go b/example-codspeed/fib.go new file mode 100644 index 0000000..89f515d --- /dev/null +++ b/example-codspeed/fib.go @@ -0,0 +1,8 @@ +package example + +func fibonacci(n int) int { + if n <= 1 { + return n + } + return fibonacci(n-1) + fibonacci(n-2) +} diff --git a/example-codspeed/fib_codspeed.go b/example-codspeed/fib_codspeed.go new file mode 100644 index 0000000..680cdde --- /dev/null +++ b/example-codspeed/fib_codspeed.go @@ -0,0 +1,35 @@ +package example + +import ( + testing "github.com/CodSpeedHQ/codspeed-go/compat/testing" +) + +func BenchmarkFibonacci10(b *testing.B) { + // b.Run("fibonacci(40)", func(b *testing.B) { + // for i := 0; i < b.N; i++ { + // fibonacci(10) + // } + // }) + // b.Run("fibonacci(20)", func(b *testing.B) { + // for i := 0; i < b.N; i++ { + // fibonacci(20) + // } + // }) + b.RunParallel(func(b *testing.PB) { + for b.Next() { + fibonacci(30) + } + }) +} + +func BenchmarkFibonacci20(b *testing.B) { + for b.Loop() { + fibonacci(30) + } +} + +// func BenchmarkFibonacci30(b *testing.B) { +// for i := 0; i < b.N; i++ { +// fibonacci(30) +// } +// } diff --git a/example-codspeed/go.mod b/example-codspeed/go.mod new file mode 100644 index 0000000..5d4d6cd --- /dev/null +++ b/example-codspeed/go.mod @@ -0,0 +1,7 @@ +module example + +go 1.24.3 + +require github.com/CodSpeedHQ/codspeed-go v0.0.0 + +replace github.com/CodSpeedHQ/codspeed-go => .. diff --git a/example/fib.go b/example/fib.go new file mode 100644 index 0000000..89f515d --- /dev/null +++ b/example/fib.go @@ -0,0 +1,8 @@ +package example + +func fibonacci(n int) int { + if n <= 1 { + return n + } + return fibonacci(n-1) + fibonacci(n-2) +} diff --git a/example/fib_test.go b/example/fib_test.go new file mode 100644 index 0000000..09dbad8 --- /dev/null +++ b/example/fib_test.go @@ -0,0 +1,32 @@ +package example + +import "testing" + +func BenchmarkFibonacci10(b *testing.B) { + b.Run("fibonacci(10)", func(b *testing.B) { + b.Run("fibonacci(10)", func(b *testing.B) { + for i := 0; i < b.N; i++ { + fibonacci(10) + } + }) + + }) +} + +func BenchmarkFibonacci20_Loop(b *testing.B) { + for b.Loop() { + fibonacci(20) + } +} + +func BenchmarkFibonacci20_bN(b *testing.B) { + for i := 0; i < b.N; i++ { + fibonacci(20) + } +} + +// func BenchmarkFibonacci30(b *testing.B) { +// b.Run("fibonacci(30)", func(b *testing.B) { +// this shouldn't be executed +// }) +// } diff --git a/example/go.mod b/example/go.mod new file mode 100644 index 0000000..bbfe4ce --- /dev/null +++ b/example/go.mod @@ -0,0 +1,3 @@ +module example + +go 1.24.3 diff --git a/example/sleep_test.go b/example/sleep_test.go new file mode 100644 index 0000000..85af0ee --- /dev/null +++ b/example/sleep_test.go @@ -0,0 +1,55 @@ +package example + +import ( + "testing" + "time" +) + +func busyWait(duration time.Duration) { + start := time.Now() + for time.Since(start) < duration { + // Busy wait loop + } +} + +func BenchmarkSleep100ns(b *testing.B) { + for i := 0; i < b.N; i++ { + busyWait(100 * time.Nanosecond) + } +} + +func BenchmarkSleep1us(b *testing.B) { + for i := 0; i < b.N; i++ { + busyWait(1 * time.Microsecond) + } +} + +func BenchmarkSleep10us(b *testing.B) { + for i := 0; i < b.N; i++ { + busyWait(10 * time.Microsecond) + } +} + +func BenchmarkSleep100us(b *testing.B) { + for i := 0; i < b.N; i++ { + busyWait(100 * time.Microsecond) + } +} + +func BenchmarkSleep1ms(b *testing.B) { + for i := 0; i < b.N; i++ { + busyWait(1 * time.Millisecond) + } +} + +func BenchmarkSleep10ms(b *testing.B) { + for i := 0; i < b.N; i++ { + busyWait(10 * time.Millisecond) + } +} + +func BenchmarkSleep50ms(b *testing.B) { + for i := 0; i < b.N; i++ { + busyWait(50 * time.Millisecond) + } +} diff --git a/go.mod b/go.mod new file mode 100644 index 0000000..124f58c --- /dev/null +++ b/go.mod @@ -0,0 +1,3 @@ +module github.com/CodSpeedHQ/codspeed-go + +go 1.24.3 diff --git a/testing/fork.sh b/testing/fork.sh new file mode 100755 index 0000000..59f2c75 --- /dev/null +++ b/testing/fork.sh @@ -0,0 +1,30 @@ +#!/usr/bin/env bash + +git clone -b release-branch.go1.24 --depth 1 https://github.com/golang/go/ + +rm -rf internal testing + +# We need to copy the testing/ package: +cp -r go/src/testing testing/ + +mkdir -p internal/cpu +mkdir -p internal/fuzz +mkdir -p internal/goarch +mkdir -p internal/race +mkdir -p internal/sysinfo +mkdir -p internal/testlog +mkdir -p internal/goexperiment + +cp -r go/src/internal/cpu/* internal/cpu/ +cp -r go/src/internal/fuzz/* internal/fuzz/ +cp -r go/src/internal/goarch/* internal/goarch/ +cp -r go/src/internal/race/* internal/race/ +cp -r go/src/internal/sysinfo/* internal/sysinfo/ +cp -r go/src/internal/testlog/* internal/testlog/ +cp -r go/src/internal/goexperiment/* internal/goexperiment/ + +# Replace all `"internal/*"` imports with 'github.com/CodSpeedHQ/codspeed-go/testing/internal/' +find . -type f -name "*.go" -exec sed -i 's|"internal/|"github.com/CodSpeedHQ/codspeed-go/testing/internal/|g' {} + + +# Apply the race package patch to remove abi dependency +patch -p1 --forward --reject-file=- < internal_race.patch || echo "Patch may have already been applied or needs manual intervention" diff --git a/testing/internal/bisect/bisect.go b/testing/internal/bisect/bisect.go new file mode 100644 index 0000000..a79bb80 --- /dev/null +++ b/testing/internal/bisect/bisect.go @@ -0,0 +1,778 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package bisect can be used by compilers and other programs +// to serve as a target for the bisect debugging tool. +// See [golang.org/x/tools/cmd/bisect] for details about using the tool. +// +// To be a bisect target, allowing bisect to help determine which of a set of independent +// changes provokes a failure, a program needs to: +// +// 1. Define a way to accept a change pattern on its command line or in its environment. +// The most common mechanism is a command-line flag. +// The pattern can be passed to [New] to create a [Matcher], the compiled form of a pattern. +// +// 2. Assign each change a unique ID. One possibility is to use a sequence number, +// but the most common mechanism is to hash some kind of identifying information +// like the file and line number where the change might be applied. +// [Hash] hashes its arguments to compute an ID. +// +// 3. Enable each change that the pattern says should be enabled. +// The [Matcher.ShouldEnable] method answers this question for a given change ID. +// +// 4. Print a report identifying each change that the pattern says should be printed. +// The [Matcher.ShouldPrint] method answers this question for a given change ID. +// The report consists of one more lines on standard error or standard output +// that contain a “match marker”. [Marker] returns the match marker for a given ID. +// When bisect reports a change as causing the failure, it identifies the change +// by printing the report lines with the match marker removed. +// +// # Example Usage +// +// A program starts by defining how it receives the pattern. In this example, we will assume a flag. +// The next step is to compile the pattern: +// +// m, err := bisect.New(patternFlag) +// if err != nil { +// log.Fatal(err) +// } +// +// Then, each time a potential change is considered, the program computes +// a change ID by hashing identifying information (source file and line, in this case) +// and then calls m.ShouldPrint and m.ShouldEnable to decide whether to +// print and enable the change, respectively. The two can return different values +// depending on whether bisect is trying to find a minimal set of changes to +// disable or to enable to provoke the failure. +// +// It is usually helpful to write a helper function that accepts the identifying information +// and then takes care of hashing, printing, and reporting whether the identified change +// should be enabled. For example, a helper for changes identified by a file and line number +// would be: +// +// func ShouldEnable(file string, line int) { +// h := bisect.Hash(file, line) +// if m.ShouldPrint(h) { +// fmt.Fprintf(os.Stderr, "%v %s:%d\n", bisect.Marker(h), file, line) +// } +// return m.ShouldEnable(h) +// } +// +// Finally, note that New returns a nil Matcher when there is no pattern, +// meaning that the target is not running under bisect at all, +// so all changes should be enabled and none should be printed. +// In that common case, the computation of the hash can be avoided entirely +// by checking for m == nil first: +// +// func ShouldEnable(file string, line int) bool { +// if m == nil { +// return true +// } +// h := bisect.Hash(file, line) +// if m.ShouldPrint(h) { +// fmt.Fprintf(os.Stderr, "%v %s:%d\n", bisect.Marker(h), file, line) +// } +// return m.ShouldEnable(h) +// } +// +// When the identifying information is expensive to format, this code can call +// [Matcher.MarkerOnly] to find out whether short report lines containing only the +// marker are permitted for a given run. (Bisect permits such lines when it is +// still exploring the space of possible changes and will not be showing the +// output to the user.) If so, the client can choose to print only the marker: +// +// func ShouldEnable(file string, line int) bool { +// if m == nil { +// return true +// } +// h := bisect.Hash(file, line) +// if m.ShouldPrint(h) { +// if m.MarkerOnly() { +// bisect.PrintMarker(os.Stderr, h) +// } else { +// fmt.Fprintf(os.Stderr, "%v %s:%d\n", bisect.Marker(h), file, line) +// } +// } +// return m.ShouldEnable(h) +// } +// +// This specific helper – deciding whether to enable a change identified by +// file and line number and printing about the change when necessary – is +// provided by the [Matcher.FileLine] method. +// +// Another common usage is deciding whether to make a change in a function +// based on the caller's stack, to identify the specific calling contexts that the +// change breaks. The [Matcher.Stack] method takes care of obtaining the stack, +// printing it when necessary, and reporting whether to enable the change +// based on that stack. +// +// # Pattern Syntax +// +// Patterns are generated by the bisect tool and interpreted by [New]. +// Users should not have to understand the patterns except when +// debugging a target's bisect support or debugging the bisect tool itself. +// +// The pattern syntax selecting a change is a sequence of bit strings +// separated by + and - operators. Each bit string denotes the set of +// changes with IDs ending in those bits, + is set addition, - is set subtraction, +// and the expression is evaluated in the usual left-to-right order. +// The special binary number “y” denotes the set of all changes, +// standing in for the empty bit string. +// In the expression, all the + operators must appear before all the - operators. +// A leading + adds to an empty set. A leading - subtracts from the set of all +// possible suffixes. +// +// For example: +// +// - “01+10” and “+01+10” both denote the set of changes +// with IDs ending with the bits 01 or 10. +// +// - “01+10-1001” denotes the set of changes with IDs +// ending with the bits 01 or 10, but excluding those ending in 1001. +// +// - “-01-1000” and “y-01-1000 both denote the set of all changes +// with IDs not ending in 01 nor 1000. +// +// - “0+1-01+001” is not a valid pattern, because all the + operators do not +// appear before all the - operators. +// +// In the syntaxes described so far, the pattern specifies the changes to +// enable and report. If a pattern is prefixed by a “!”, the meaning +// changes: the pattern specifies the changes to DISABLE and report. This +// mode of operation is needed when a program passes with all changes +// enabled but fails with no changes enabled. In this case, bisect +// searches for minimal sets of changes to disable. +// Put another way, the leading “!” inverts the result from [Matcher.ShouldEnable] +// but does not invert the result from [Matcher.ShouldPrint]. +// +// As a convenience for manual debugging, “n” is an alias for “!y”, +// meaning to disable and report all changes. +// +// Finally, a leading “v” in the pattern indicates that the reports will be shown +// to the user of bisect to describe the changes involved in a failure. +// At the API level, the leading “v” causes [Matcher.Visible] to return true. +// See the next section for details. +// +// # Match Reports +// +// The target program must enable only those changed matched +// by the pattern, and it must print a match report for each such change. +// A match report consists of one or more lines of text that will be +// printed by the bisect tool to describe a change implicated in causing +// a failure. Each line in the report for a given change must contain a +// match marker with that change ID, as returned by [Marker]. +// The markers are elided when displaying the lines to the user. +// +// A match marker has the form “[bisect-match 0x1234]” where +// 0x1234 is the change ID in hexadecimal. +// An alternate form is “[bisect-match 010101]”, giving the change ID in binary. +// +// When [Matcher.Visible] returns false, the match reports are only +// being processed by bisect to learn the set of enabled changes, +// not shown to the user, meaning that each report can be a match +// marker on a line by itself, eliding the usual textual description. +// When the textual description is expensive to compute, +// checking [Matcher.Visible] can help the avoid that expense +// in most runs. +package bisect + +import ( + "runtime" + "sync" + "sync/atomic" +) + +// New creates and returns a new Matcher implementing the given pattern. +// The pattern syntax is defined in the package doc comment. +// +// In addition to the pattern syntax syntax, New("") returns nil, nil. +// The nil *Matcher is valid for use: it returns true from ShouldEnable +// and false from ShouldPrint for all changes. Callers can avoid calling +// [Hash], [Matcher.ShouldEnable], and [Matcher.ShouldPrint] entirely +// when they recognize the nil Matcher. +func New(pattern string) (*Matcher, error) { + if pattern == "" { + return nil, nil + } + + m := new(Matcher) + + p := pattern + // Special case for leading 'q' so that 'qn' quietly disables, e.g. fmahash=qn to disable fma + // Any instance of 'v' disables 'q'. + if len(p) > 0 && p[0] == 'q' { + m.quiet = true + p = p[1:] + if p == "" { + return nil, &parseError{"invalid pattern syntax: " + pattern} + } + } + // Allow multiple v, so that “bisect cmd vPATTERN” can force verbose all the time. + for len(p) > 0 && p[0] == 'v' { + m.verbose = true + m.quiet = false + p = p[1:] + if p == "" { + return nil, &parseError{"invalid pattern syntax: " + pattern} + } + } + + // Allow multiple !, each negating the last, so that “bisect cmd !PATTERN” works + // even when bisect chooses to add its own !. + m.enable = true + for len(p) > 0 && p[0] == '!' { + m.enable = !m.enable + p = p[1:] + if p == "" { + return nil, &parseError{"invalid pattern syntax: " + pattern} + } + } + + if p == "n" { + // n is an alias for !y. + m.enable = !m.enable + p = "y" + } + + // Parse actual pattern syntax. + result := true + bits := uint64(0) + start := 0 + wid := 1 // 1-bit (binary); sometimes 4-bit (hex) + for i := 0; i <= len(p); i++ { + // Imagine a trailing - at the end of the pattern to flush final suffix + c := byte('-') + if i < len(p) { + c = p[i] + } + if i == start && wid == 1 && c == 'x' { // leading x for hex + start = i + 1 + wid = 4 + continue + } + switch c { + default: + return nil, &parseError{"invalid pattern syntax: " + pattern} + case '2', '3', '4', '5', '6', '7', '8', '9': + if wid != 4 { + return nil, &parseError{"invalid pattern syntax: " + pattern} + } + fallthrough + case '0', '1': + bits <<= wid + bits |= uint64(c - '0') + case 'a', 'b', 'c', 'd', 'e', 'f', 'A', 'B', 'C', 'D', 'E', 'F': + if wid != 4 { + return nil, &parseError{"invalid pattern syntax: " + pattern} + } + bits <<= 4 + bits |= uint64(c&^0x20 - 'A' + 10) + case 'y': + if i+1 < len(p) && (p[i+1] == '0' || p[i+1] == '1') { + return nil, &parseError{"invalid pattern syntax: " + pattern} + } + bits = 0 + case '+', '-': + if c == '+' && result == false { + // Have already seen a -. Should be - from here on. + return nil, &parseError{"invalid pattern syntax (+ after -): " + pattern} + } + if i > 0 { + n := (i - start) * wid + if n > 64 { + return nil, &parseError{"pattern bits too long: " + pattern} + } + if n <= 0 { + return nil, &parseError{"invalid pattern syntax: " + pattern} + } + if p[start] == 'y' { + n = 0 + } + mask := uint64(1)<= 0; i-- { + c := &m.list[i] + if id&c.mask == c.bits { + return c.result + } + } + return false +} + +// FileLine reports whether the change identified by file and line should be enabled. +// If the change should be printed, FileLine prints a one-line report to w. +func (m *Matcher) FileLine(w Writer, file string, line int) bool { + if m == nil { + return true + } + return m.fileLine(w, file, line) +} + +// fileLine does the real work for FileLine. +// This lets FileLine's body handle m == nil and potentially be inlined. +func (m *Matcher) fileLine(w Writer, file string, line int) bool { + h := Hash(file, line) + if m.ShouldPrint(h) { + if m.MarkerOnly() { + PrintMarker(w, h) + } else { + printFileLine(w, h, file, line) + } + } + return m.ShouldEnable(h) +} + +// printFileLine prints a non-marker-only report for file:line to w. +func printFileLine(w Writer, h uint64, file string, line int) error { + const markerLen = 40 // overestimate + b := make([]byte, 0, markerLen+len(file)+24) + b = AppendMarker(b, h) + b = appendFileLine(b, file, line) + b = append(b, '\n') + _, err := w.Write(b) + return err +} + +// appendFileLine appends file:line to dst, returning the extended slice. +func appendFileLine(dst []byte, file string, line int) []byte { + dst = append(dst, file...) + dst = append(dst, ':') + u := uint(line) + if line < 0 { + dst = append(dst, '-') + u = -u + } + var buf [24]byte + i := len(buf) + for i == len(buf) || u > 0 { + i-- + buf[i] = '0' + byte(u%10) + u /= 10 + } + dst = append(dst, buf[i:]...) + return dst +} + +// MatchStack assigns the current call stack a change ID. +// If the stack should be printed, MatchStack prints it. +// Then MatchStack reports whether a change at the current call stack should be enabled. +func (m *Matcher) Stack(w Writer) bool { + if m == nil { + return true + } + return m.stack(w) +} + +// stack does the real work for Stack. +// This lets stack's body handle m == nil and potentially be inlined. +func (m *Matcher) stack(w Writer) bool { + const maxStack = 16 + var stk [maxStack]uintptr + n := runtime.Callers(2, stk[:]) + // caller #2 is not for printing; need it to normalize PCs if ASLR. + if n <= 1 { + return false + } + + base := stk[0] + // normalize PCs + for i := range stk[:n] { + stk[i] -= base + } + + h := Hash(stk[:n]) + if m.ShouldPrint(h) { + var d *dedup + for { + d = m.dedup.Load() + if d != nil { + break + } + d = new(dedup) + if m.dedup.CompareAndSwap(nil, d) { + break + } + } + + if m.MarkerOnly() { + if !d.seenLossy(h) { + PrintMarker(w, h) + } + } else { + if !d.seen(h) { + // Restore PCs in stack for printing + for i := range stk[:n] { + stk[i] += base + } + printStack(w, h, stk[1:n]) + } + } + } + return m.ShouldEnable(h) +} + +// Writer is the same interface as io.Writer. +// It is duplicated here to avoid importing io. +type Writer interface { + Write([]byte) (int, error) +} + +// PrintMarker prints to w a one-line report containing only the marker for h. +// It is appropriate to use when [Matcher.ShouldPrint] and [Matcher.MarkerOnly] both return true. +func PrintMarker(w Writer, h uint64) error { + var buf [50]byte + b := AppendMarker(buf[:0], h) + b = append(b, '\n') + _, err := w.Write(b) + return err +} + +// printStack prints to w a multi-line report containing a formatting of the call stack stk, +// with each line preceded by the marker for h. +func printStack(w Writer, h uint64, stk []uintptr) error { + buf := make([]byte, 0, 2048) + + var prefixBuf [100]byte + prefix := AppendMarker(prefixBuf[:0], h) + + frames := runtime.CallersFrames(stk) + for { + f, more := frames.Next() + buf = append(buf, prefix...) + buf = append(buf, f.Function...) + buf = append(buf, "()\n"...) + buf = append(buf, prefix...) + buf = append(buf, '\t') + buf = appendFileLine(buf, f.File, f.Line) + buf = append(buf, '\n') + if !more { + break + } + } + buf = append(buf, prefix...) + buf = append(buf, '\n') + _, err := w.Write(buf) + return err +} + +// Marker returns the match marker text to use on any line reporting details +// about a match of the given ID. +// It always returns the hexadecimal format. +func Marker(id uint64) string { + return string(AppendMarker(nil, id)) +} + +// AppendMarker is like [Marker] but appends the marker to dst. +func AppendMarker(dst []byte, id uint64) []byte { + const prefix = "[bisect-match 0x" + var buf [len(prefix) + 16 + 1]byte + copy(buf[:], prefix) + for i := 0; i < 16; i++ { + buf[len(prefix)+i] = "0123456789abcdef"[id>>60] + id <<= 4 + } + buf[len(prefix)+16] = ']' + return append(dst, buf[:]...) +} + +// CutMarker finds the first match marker in line and removes it, +// returning the shortened line (with the marker removed), +// the ID from the match marker, +// and whether a marker was found at all. +// If there is no marker, CutMarker returns line, 0, false. +func CutMarker(line string) (short string, id uint64, ok bool) { + // Find first instance of prefix. + prefix := "[bisect-match " + i := 0 + for ; ; i++ { + if i >= len(line)-len(prefix) { + return line, 0, false + } + if line[i] == '[' && line[i:i+len(prefix)] == prefix { + break + } + } + + // Scan to ]. + j := i + len(prefix) + for j < len(line) && line[j] != ']' { + j++ + } + if j >= len(line) { + return line, 0, false + } + + // Parse id. + idstr := line[i+len(prefix) : j] + if len(idstr) >= 3 && idstr[:2] == "0x" { + // parse hex + if len(idstr) > 2+16 { // max 0x + 16 digits + return line, 0, false + } + for i := 2; i < len(idstr); i++ { + id <<= 4 + switch c := idstr[i]; { + case '0' <= c && c <= '9': + id |= uint64(c - '0') + case 'a' <= c && c <= 'f': + id |= uint64(c - 'a' + 10) + case 'A' <= c && c <= 'F': + id |= uint64(c - 'A' + 10) + } + } + } else { + if idstr == "" || len(idstr) > 64 { // min 1 digit, max 64 digits + return line, 0, false + } + // parse binary + for i := 0; i < len(idstr); i++ { + id <<= 1 + switch c := idstr[i]; c { + default: + return line, 0, false + case '0', '1': + id |= uint64(c - '0') + } + } + } + + // Construct shortened line. + // Remove at most one space from around the marker, + // so that "foo [marker] bar" shortens to "foo bar". + j++ // skip ] + if i > 0 && line[i-1] == ' ' { + i-- + } else if j < len(line) && line[j] == ' ' { + j++ + } + short = line[:i] + line[j:] + return short, id, true +} + +// Hash computes a hash of the data arguments, +// each of which must be of type string, byte, int, uint, int32, uint32, int64, uint64, uintptr, or a slice of one of those types. +func Hash(data ...any) uint64 { + h := offset64 + for _, v := range data { + switch v := v.(type) { + default: + // Note: Not printing the type, because reflect.ValueOf(v) + // would make the interfaces prepared by the caller escape + // and therefore allocate. This way, Hash(file, line) runs + // without any allocation. It should be clear from the + // source code calling Hash what the bad argument was. + panic("bisect.Hash: unexpected argument type") + case string: + h = fnvString(h, v) + case byte: + h = fnv(h, v) + case int: + h = fnvUint64(h, uint64(v)) + case uint: + h = fnvUint64(h, uint64(v)) + case int32: + h = fnvUint32(h, uint32(v)) + case uint32: + h = fnvUint32(h, v) + case int64: + h = fnvUint64(h, uint64(v)) + case uint64: + h = fnvUint64(h, v) + case uintptr: + h = fnvUint64(h, uint64(v)) + case []string: + for _, x := range v { + h = fnvString(h, x) + } + case []byte: + for _, x := range v { + h = fnv(h, x) + } + case []int: + for _, x := range v { + h = fnvUint64(h, uint64(x)) + } + case []uint: + for _, x := range v { + h = fnvUint64(h, uint64(x)) + } + case []int32: + for _, x := range v { + h = fnvUint32(h, uint32(x)) + } + case []uint32: + for _, x := range v { + h = fnvUint32(h, x) + } + case []int64: + for _, x := range v { + h = fnvUint64(h, uint64(x)) + } + case []uint64: + for _, x := range v { + h = fnvUint64(h, x) + } + case []uintptr: + for _, x := range v { + h = fnvUint64(h, uint64(x)) + } + } + } + return h +} + +// Trivial error implementation, here to avoid importing errors. + +// parseError is a trivial error implementation, +// defined here to avoid importing errors. +type parseError struct{ text string } + +func (e *parseError) Error() string { return e.text } + +// FNV-1a implementation. See Go's hash/fnv/fnv.go. +// Copied here for simplicity (can handle integers more directly) +// and to avoid importing hash/fnv. + +const ( + offset64 uint64 = 14695981039346656037 + prime64 uint64 = 1099511628211 +) + +func fnv(h uint64, x byte) uint64 { + h ^= uint64(x) + h *= prime64 + return h +} + +func fnvString(h uint64, x string) uint64 { + for i := 0; i < len(x); i++ { + h ^= uint64(x[i]) + h *= prime64 + } + return h +} + +func fnvUint64(h uint64, x uint64) uint64 { + for i := 0; i < 8; i++ { + h ^= x & 0xFF + x >>= 8 + h *= prime64 + } + return h +} + +func fnvUint32(h uint64, x uint32) uint64 { + for i := 0; i < 4; i++ { + h ^= uint64(x & 0xFF) + x >>= 8 + h *= prime64 + } + return h +} + +// A dedup is a deduplicator for call stacks, so that we only print +// a report for new call stacks, not for call stacks we've already +// reported. +// +// It has two modes: an approximate but lock-free mode that +// may still emit some duplicates, and a precise mode that uses +// a lock and never emits duplicates. +type dedup struct { + // 128-entry 4-way, lossy cache for seenLossy + recent [128][4]uint64 + + // complete history for seen + mu sync.Mutex + m map[uint64]bool +} + +// seen records that h has now been seen and reports whether it was seen before. +// When seen returns false, the caller is expected to print a report for h. +func (d *dedup) seen(h uint64) bool { + d.mu.Lock() + if d.m == nil { + d.m = make(map[uint64]bool) + } + seen := d.m[h] + d.m[h] = true + d.mu.Unlock() + return seen +} + +// seenLossy is a variant of seen that avoids a lock by using a cache of recently seen hashes. +// Each cache entry is N-way set-associative: h can appear in any of the slots. +// If h does not appear in any of them, then it is inserted into a random slot, +// overwriting whatever was there before. +func (d *dedup) seenLossy(h uint64) bool { + cache := &d.recent[uint(h)%uint(len(d.recent))] + for i := 0; i < len(cache); i++ { + if atomic.LoadUint64(&cache[i]) == h { + return true + } + } + + // Compute index in set to evict as hash of current set. + ch := offset64 + for _, x := range cache { + ch = fnvUint64(ch, x) + } + atomic.StoreUint64(&cache[uint(ch)%uint(len(cache))], h) + return false +} diff --git a/testing/internal/cfg/cfg.go b/testing/internal/cfg/cfg.go new file mode 100644 index 0000000..9329769 --- /dev/null +++ b/testing/internal/cfg/cfg.go @@ -0,0 +1,74 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cfg holds configuration shared by the Go command and internal/testenv. +// Definitions that don't need to be exposed outside of cmd/go should be in +// cmd/go/internal/cfg instead of this package. +package cfg + +// KnownEnv is a list of environment variables that affect the operation +// of the Go command. +const KnownEnv = ` + AR + CC + CGO_CFLAGS + CGO_CFLAGS_ALLOW + CGO_CFLAGS_DISALLOW + CGO_CPPFLAGS + CGO_CPPFLAGS_ALLOW + CGO_CPPFLAGS_DISALLOW + CGO_CXXFLAGS + CGO_CXXFLAGS_ALLOW + CGO_CXXFLAGS_DISALLOW + CGO_ENABLED + CGO_FFLAGS + CGO_FFLAGS_ALLOW + CGO_FFLAGS_DISALLOW + CGO_LDFLAGS + CGO_LDFLAGS_ALLOW + CGO_LDFLAGS_DISALLOW + CXX + FC + GCCGO + GO111MODULE + GO386 + GOAMD64 + GOARCH + GOARM + GOARM64 + GOAUTH + GOBIN + GOCACHE + GOCACHEPROG + GOENV + GOEXE + GOEXPERIMENT + GOFIPS140 + GOFLAGS + GOGCCFLAGS + GOHOSTARCH + GOHOSTOS + GOINSECURE + GOMIPS + GOMIPS64 + GOMODCACHE + GONOPROXY + GONOSUMDB + GOOS + GOPATH + GOPPC64 + GOPRIVATE + GOPROXY + GORISCV64 + GOROOT + GOSUMDB + GOTMPDIR + GOTOOLCHAIN + GOTOOLDIR + GOVCS + GOWASM + GOWORK + GO_EXTLINK_ENABLED + PKG_CONFIG +` diff --git a/testing/internal/cpu/cpu.go b/testing/internal/cpu/cpu.go new file mode 100644 index 0000000..7b0bdfb --- /dev/null +++ b/testing/internal/cpu/cpu.go @@ -0,0 +1,252 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cpu implements processor feature detection +// used by the Go standard library. +package cpu + +import _ "unsafe" // for linkname + +// DebugOptions is set to true by the runtime if the OS supports reading +// GODEBUG early in runtime startup. +// This should not be changed after it is initialized. +var DebugOptions bool + +// CacheLinePad is used to pad structs to avoid false sharing. +type CacheLinePad struct{ _ [CacheLinePadSize]byte } + +// CacheLineSize is the CPU's assumed cache line size. +// There is currently no runtime detection of the real cache line size +// so we use the constant per GOARCH CacheLinePadSize as an approximation. +var CacheLineSize uintptr = CacheLinePadSize + +// The booleans in X86 contain the correspondingly named cpuid feature bit. +// HasAVX and HasAVX2 are only set if the OS does support XMM and YMM registers +// in addition to the cpuid feature bit being set. +// The struct is padded to avoid false sharing. +var X86 struct { + _ CacheLinePad + HasAES bool + HasADX bool + HasAVX bool + HasAVX2 bool + HasAVX512F bool + HasAVX512BW bool + HasAVX512VL bool + HasBMI1 bool + HasBMI2 bool + HasERMS bool + HasFSRM bool + HasFMA bool + HasOSXSAVE bool + HasPCLMULQDQ bool + HasPOPCNT bool + HasRDTSCP bool + HasSHA bool + HasSSE3 bool + HasSSSE3 bool + HasSSE41 bool + HasSSE42 bool + _ CacheLinePad +} + +// The booleans in ARM contain the correspondingly named cpu feature bit. +// The struct is padded to avoid false sharing. +var ARM struct { + _ CacheLinePad + HasVFPv4 bool + HasIDIVA bool + HasV7Atomics bool + _ CacheLinePad +} + +// The booleans in ARM64 contain the correspondingly named cpu feature bit. +// The struct is padded to avoid false sharing. +var ARM64 struct { + _ CacheLinePad + HasAES bool + HasPMULL bool + HasSHA1 bool + HasSHA2 bool + HasSHA512 bool + HasCRC32 bool + HasATOMICS bool + HasCPUID bool + HasDIT bool + IsNeoverse bool + _ CacheLinePad +} + +// The booleans in Loong64 contain the correspondingly named cpu feature bit. +// The struct is padded to avoid false sharing. +var Loong64 struct { + _ CacheLinePad + HasLSX bool // support 128-bit vector extension + HasCRC32 bool // support CRC instruction + HasLAMCAS bool // support AMCAS[_DB].{B/H/W/D} + HasLAM_BH bool // support AM{SWAP/ADD}[_DB].{B/H} instruction + _ CacheLinePad +} + +var MIPS64X struct { + _ CacheLinePad + HasMSA bool // MIPS SIMD architecture + _ CacheLinePad +} + +// For ppc64(le), it is safe to check only for ISA level starting on ISA v3.00, +// since there are no optional categories. There are some exceptions that also +// require kernel support to work (darn, scv), so there are feature bits for +// those as well. The minimum processor requirement is POWER8 (ISA 2.07). +// The struct is padded to avoid false sharing. +var PPC64 struct { + _ CacheLinePad + HasDARN bool // Hardware random number generator (requires kernel enablement) + HasSCV bool // Syscall vectored (requires kernel enablement) + IsPOWER8 bool // ISA v2.07 (POWER8) + IsPOWER9 bool // ISA v3.00 (POWER9) + IsPOWER10 bool // ISA v3.1 (POWER10) + _ CacheLinePad +} + +var S390X struct { + _ CacheLinePad + HasZARCH bool // z architecture mode is active [mandatory] + HasSTFLE bool // store facility list extended [mandatory] + HasLDISP bool // long (20-bit) displacements [mandatory] + HasEIMM bool // 32-bit immediates [mandatory] + HasDFP bool // decimal floating point + HasETF3EH bool // ETF-3 enhanced + HasMSA bool // message security assist (CPACF) + HasAES bool // KM-AES{128,192,256} functions + HasAESCBC bool // KMC-AES{128,192,256} functions + HasAESCTR bool // KMCTR-AES{128,192,256} functions + HasAESGCM bool // KMA-GCM-AES{128,192,256} functions + HasGHASH bool // KIMD-GHASH function + HasSHA1 bool // K{I,L}MD-SHA-1 functions + HasSHA256 bool // K{I,L}MD-SHA-256 functions + HasSHA512 bool // K{I,L}MD-SHA-512 functions + HasSHA3 bool // K{I,L}MD-SHA3-{224,256,384,512} and K{I,L}MD-SHAKE-{128,256} functions + HasVX bool // vector facility. Note: the runtime sets this when it processes auxv records. + HasVXE bool // vector-enhancements facility 1 + HasKDSA bool // elliptic curve functions + HasECDSA bool // NIST curves + HasEDDSA bool // Edwards curves + _ CacheLinePad +} + +// CPU feature variables are accessed by assembly code in various packages. +//go:linkname X86 +//go:linkname ARM +//go:linkname ARM64 +//go:linkname Loong64 +//go:linkname MIPS64X +//go:linkname PPC64 +//go:linkname S390X + +// Initialize examines the processor and sets the relevant variables above. +// This is called by the runtime package early in program initialization, +// before normal init functions are run. env is set by runtime if the OS supports +// cpu feature options in GODEBUG. +func Initialize(env string) { + doinit() + processOptions(env) +} + +// options contains the cpu debug options that can be used in GODEBUG. +// Options are arch dependent and are added by the arch specific doinit functions. +// Features that are mandatory for the specific GOARCH should not be added to options +// (e.g. SSE2 on amd64). +var options []option + +// Option names should be lower case. e.g. avx instead of AVX. +type option struct { + Name string + Feature *bool + Specified bool // whether feature value was specified in GODEBUG + Enable bool // whether feature should be enabled +} + +// processOptions enables or disables CPU feature values based on the parsed env string. +// The env string is expected to be of the form cpu.feature1=value1,cpu.feature2=value2... +// where feature names is one of the architecture specific list stored in the +// cpu packages options variable and values are either 'on' or 'off'. +// If env contains cpu.all=off then all cpu features referenced through the options +// variable are disabled. Other feature names and values result in warning messages. +func processOptions(env string) { +field: + for env != "" { + field := "" + i := indexByte(env, ',') + if i < 0 { + field, env = env, "" + } else { + field, env = env[:i], env[i+1:] + } + if len(field) < 4 || field[:4] != "cpu." { + continue + } + i = indexByte(field, '=') + if i < 0 { + print("GODEBUG: no value specified for \"", field, "\"\n") + continue + } + key, value := field[4:i], field[i+1:] // e.g. "SSE2", "on" + + var enable bool + switch value { + case "on": + enable = true + case "off": + enable = false + default: + print("GODEBUG: value \"", value, "\" not supported for cpu option \"", key, "\"\n") + continue field + } + + if key == "all" { + for i := range options { + options[i].Specified = true + options[i].Enable = enable + } + continue field + } + + for i := range options { + if options[i].Name == key { + options[i].Specified = true + options[i].Enable = enable + continue field + } + } + + print("GODEBUG: unknown cpu feature \"", key, "\"\n") + } + + for _, o := range options { + if !o.Specified { + continue + } + + if o.Enable && !*o.Feature { + print("GODEBUG: can not enable \"", o.Name, "\", missing CPU support\n") + continue + } + + *o.Feature = o.Enable + } +} + +// indexByte returns the index of the first instance of c in s, +// or -1 if c is not present in s. +// indexByte is semantically the same as [strings.IndexByte]. +// We copy this function because "github.com/CodSpeedHQ/codspeed-go/testing/internal/cpu" should not have external dependencies. +func indexByte(s string, c byte) int { + for i := 0; i < len(s); i++ { + if s[i] == c { + return i + } + } + return -1 +} diff --git a/testing/internal/cpu/cpu.s b/testing/internal/cpu/cpu.s new file mode 100644 index 0000000..20166c8 --- /dev/null +++ b/testing/internal/cpu/cpu.s @@ -0,0 +1,6 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This assembly file exists to allow internal/cpu to call +// non-exported runtime functions that use "go:linkname". diff --git a/testing/internal/cpu/cpu_arm.go b/testing/internal/cpu/cpu_arm.go new file mode 100644 index 0000000..080e788 --- /dev/null +++ b/testing/internal/cpu/cpu_arm.go @@ -0,0 +1,48 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +const CacheLinePadSize = 32 + +// arm doesn't have a 'cpuid' equivalent, so we rely on HWCAP/HWCAP2. +// These are initialized by archauxv() and should not be changed after they are +// initialized. +var HWCap uint +var HWCap2 uint +var Platform string + +// HWCAP/HWCAP2 bits. These are exposed by Linux and FreeBSD. +const ( + hwcap_VFPv4 = 1 << 16 + hwcap_IDIVA = 1 << 17 + hwcap_LPAE = 1 << 20 +) + +func doinit() { + options = []option{ + {Name: "vfpv4", Feature: &ARM.HasVFPv4}, + {Name: "idiva", Feature: &ARM.HasIDIVA}, + {Name: "v7atomics", Feature: &ARM.HasV7Atomics}, + } + + // HWCAP feature bits + ARM.HasVFPv4 = isSet(HWCap, hwcap_VFPv4) + ARM.HasIDIVA = isSet(HWCap, hwcap_IDIVA) + // lpae is required to make the 64-bit instructions LDRD and STRD (and variants) atomic. + // See ARMv7 manual section B1.6. + // We also need at least a v7 chip, for the DMB instruction. + ARM.HasV7Atomics = isSet(HWCap, hwcap_LPAE) && isV7(Platform) +} + +func isSet(hwc uint, value uint) bool { + return hwc&value != 0 +} + +func isV7(s string) bool { + if s == "aarch64" { + return true + } + return s >= "v7" // will be something like v5, v7, v8, v8l +} diff --git a/testing/internal/cpu/cpu_arm64.go b/testing/internal/cpu/cpu_arm64.go new file mode 100644 index 0000000..1365991 --- /dev/null +++ b/testing/internal/cpu/cpu_arm64.go @@ -0,0 +1,76 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +// CacheLinePadSize is used to prevent false sharing of cache lines. +// We choose 128 because Apple Silicon, a.k.a. M1, has 128-byte cache line size. +// It doesn't cost much and is much more future-proof. +const CacheLinePadSize = 128 + +func doinit() { + options = []option{ + {Name: "aes", Feature: &ARM64.HasAES}, + {Name: "pmull", Feature: &ARM64.HasPMULL}, + {Name: "sha1", Feature: &ARM64.HasSHA1}, + {Name: "sha2", Feature: &ARM64.HasSHA2}, + {Name: "sha512", Feature: &ARM64.HasSHA512}, + {Name: "crc32", Feature: &ARM64.HasCRC32}, + {Name: "atomics", Feature: &ARM64.HasATOMICS}, + {Name: "cpuid", Feature: &ARM64.HasCPUID}, + {Name: "isNeoverse", Feature: &ARM64.IsNeoverse}, + } + + // arm64 uses different ways to detect CPU features at runtime depending on the operating system. + osInit() +} + +func getisar0() uint64 + +func getpfr0() uint64 + +func getMIDR() uint64 + +func extractBits(data uint64, start, end uint) uint { + return (uint)(data>>start) & ((1 << (end - start + 1)) - 1) +} + +func parseARM64SystemRegisters(isar0, pfr0 uint64) { + // ID_AA64ISAR0_EL1 + switch extractBits(isar0, 4, 7) { + case 1: + ARM64.HasAES = true + case 2: + ARM64.HasAES = true + ARM64.HasPMULL = true + } + + switch extractBits(isar0, 8, 11) { + case 1: + ARM64.HasSHA1 = true + } + + switch extractBits(isar0, 12, 15) { + case 1: + ARM64.HasSHA2 = true + case 2: + ARM64.HasSHA2 = true + ARM64.HasSHA512 = true + } + + switch extractBits(isar0, 16, 19) { + case 1: + ARM64.HasCRC32 = true + } + + switch extractBits(isar0, 20, 23) { + case 2: + ARM64.HasATOMICS = true + } + + switch extractBits(pfr0, 48, 51) { + case 1: + ARM64.HasDIT = true + } +} diff --git a/testing/internal/cpu/cpu_arm64.s b/testing/internal/cpu/cpu_arm64.s new file mode 100644 index 0000000..9607561 --- /dev/null +++ b/testing/internal/cpu/cpu_arm64.s @@ -0,0 +1,25 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +// func getisar0() uint64 +TEXT ·getisar0(SB),NOSPLIT,$0 + // get Instruction Set Attributes 0 into R0 + MRS ID_AA64ISAR0_EL1, R0 + MOVD R0, ret+0(FP) + RET + +// func getpfr0() uint64 +TEXT ·getpfr0(SB),NOSPLIT,$0-8 + // get Processor Feature Register 0 into R0 + MRS ID_AA64PFR0_EL1, R0 + MOVD R0, ret+0(FP) + RET + +// func getMIDR() uint64 +TEXT ·getMIDR(SB), NOSPLIT, $0-8 + MRS MIDR_EL1, R0 + MOVD R0, ret+0(FP) + RET diff --git a/testing/internal/cpu/cpu_arm64_android.go b/testing/internal/cpu/cpu_arm64_android.go new file mode 100644 index 0000000..fbdf7ba --- /dev/null +++ b/testing/internal/cpu/cpu_arm64_android.go @@ -0,0 +1,11 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build arm64 + +package cpu + +func osInit() { + hwcapInit("android") +} diff --git a/testing/internal/cpu/cpu_arm64_darwin.go b/testing/internal/cpu/cpu_arm64_darwin.go new file mode 100644 index 0000000..57cf631 --- /dev/null +++ b/testing/internal/cpu/cpu_arm64_darwin.go @@ -0,0 +1,46 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build arm64 && darwin && !ios + +package cpu + +import _ "unsafe" // for linkname + +func osInit() { + ARM64.HasATOMICS = sysctlEnabled([]byte("hw.optional.armv8_1_atomics\x00")) + ARM64.HasCRC32 = sysctlEnabled([]byte("hw.optional.armv8_crc32\x00")) + ARM64.HasSHA512 = sysctlEnabled([]byte("hw.optional.armv8_2_sha512\x00")) + ARM64.HasDIT = sysctlEnabled([]byte("hw.optional.arm.FEAT_DIT\x00")) + + // There are no hw.optional sysctl values for the below features on Mac OS 11.0 + // to detect their supported state dynamically. Assume the CPU features that + // Apple Silicon M1 supports to be available as a minimal set of features + // to all Go programs running on darwin/arm64. + ARM64.HasAES = true + ARM64.HasPMULL = true + ARM64.HasSHA1 = true + ARM64.HasSHA2 = true +} + +//go:noescape +func getsysctlbyname(name []byte) (int32, int32) + +// sysctlEnabled should be an internal detail, +// but widely used packages access it using linkname. +// Notable members of the hall of shame include: +// - github.com/bytedance/gopkg +// - github.com/songzhibin97/gkit +// +// Do not remove or change the type signature. +// See go.dev/issue/67401. +// +//go:linkname sysctlEnabled +func sysctlEnabled(name []byte) bool { + ret, value := getsysctlbyname(name) + if ret < 0 { + return false + } + return value > 0 +} diff --git a/testing/internal/cpu/cpu_arm64_freebsd.go b/testing/internal/cpu/cpu_arm64_freebsd.go new file mode 100644 index 0000000..c339e6f --- /dev/null +++ b/testing/internal/cpu/cpu_arm64_freebsd.go @@ -0,0 +1,15 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build arm64 + +package cpu + +func osInit() { + // Retrieve info from system register ID_AA64ISAR0_EL1. + isar0 := getisar0() + prf0 := getpfr0() + + parseARM64SystemRegisters(isar0, prf0) +} diff --git a/testing/internal/cpu/cpu_arm64_hwcap.go b/testing/internal/cpu/cpu_arm64_hwcap.go new file mode 100644 index 0000000..cdc1d89 --- /dev/null +++ b/testing/internal/cpu/cpu_arm64_hwcap.go @@ -0,0 +1,79 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build arm64 && linux + +package cpu + +import _ "unsafe" // for linkname + +// HWCap may be initialized by archauxv and +// should not be changed after it was initialized. +// +// Other widely used packages +// access HWCap using linkname as well, most notably: +// - github.com/klauspost/cpuid/v2 +// +// Do not remove or change the type signature. +// See go.dev/issue/67401. +// +//go:linkname HWCap +var HWCap uint + +// HWCAP bits. These are exposed by Linux. +const ( + hwcap_AES = 1 << 3 + hwcap_PMULL = 1 << 4 + hwcap_SHA1 = 1 << 5 + hwcap_SHA2 = 1 << 6 + hwcap_CRC32 = 1 << 7 + hwcap_ATOMICS = 1 << 8 + hwcap_CPUID = 1 << 11 + hwcap_SHA512 = 1 << 21 + hwcap_DIT = 1 << 24 +) + +func hwcapInit(os string) { + // HWCap was populated by the runtime from the auxiliary vector. + // Use HWCap information since reading aarch64 system registers + // is not supported in user space on older linux kernels. + ARM64.HasAES = isSet(HWCap, hwcap_AES) + ARM64.HasPMULL = isSet(HWCap, hwcap_PMULL) + ARM64.HasSHA1 = isSet(HWCap, hwcap_SHA1) + ARM64.HasSHA2 = isSet(HWCap, hwcap_SHA2) + ARM64.HasCRC32 = isSet(HWCap, hwcap_CRC32) + ARM64.HasCPUID = isSet(HWCap, hwcap_CPUID) + ARM64.HasSHA512 = isSet(HWCap, hwcap_SHA512) + ARM64.HasDIT = isSet(HWCap, hwcap_DIT) + + // The Samsung S9+ kernel reports support for atomics, but not all cores + // actually support them, resulting in SIGILL. See issue #28431. + // TODO(elias.naur): Only disable the optimization on bad chipsets on android. + ARM64.HasATOMICS = isSet(HWCap, hwcap_ATOMICS) && os != "android" + + // Check to see if executing on a Neoverse core and in order to do that, + // check the AUXV for the CPUID bit. The getMIDR function executes an + // instruction which would normally be an illegal instruction, but it's + // trapped by the kernel, the value sanitized and then returned. + // Without the CPUID bit the kernel will not trap the instruction and the + // process will be terminated with SIGILL. + if ARM64.HasCPUID { + midr := getMIDR() + part_num := uint16((midr >> 4) & 0xfff) + implementer := byte((midr >> 24) & 0xff) + + // d0c - NeoverseN1 + // d40 - NeoverseV1 + // d49 - NeoverseN2 + // d4f - NeoverseV2 + if implementer == 'A' && (part_num == 0xd0c || part_num == 0xd40 || + part_num == 0xd49 || part_num == 0xd4f) { + ARM64.IsNeoverse = true + } + } +} + +func isSet(hwc uint, value uint) bool { + return hwc&value != 0 +} diff --git a/testing/internal/cpu/cpu_arm64_linux.go b/testing/internal/cpu/cpu_arm64_linux.go new file mode 100644 index 0000000..d746bdb --- /dev/null +++ b/testing/internal/cpu/cpu_arm64_linux.go @@ -0,0 +1,11 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build arm64 && linux && !android + +package cpu + +func osInit() { + hwcapInit("linux") +} diff --git a/testing/internal/cpu/cpu_arm64_openbsd.go b/testing/internal/cpu/cpu_arm64_openbsd.go new file mode 100644 index 0000000..6cc69c9 --- /dev/null +++ b/testing/internal/cpu/cpu_arm64_openbsd.go @@ -0,0 +1,35 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build arm64 + +package cpu + +const ( + // From OpenBSD's sys/sysctl.h. + _CTL_MACHDEP = 7 + + // From OpenBSD's machine/cpu.h. + _CPU_ID_AA64ISAR0 = 2 + _CPU_ID_AA64ISAR1 = 3 + _CPU_ID_AA64PFR0 = 8 +) + +//go:noescape +func sysctlUint64(mib []uint32) (uint64, bool) + +func osInit() { + // Get ID_AA64ISAR0 from sysctl. + isar0, ok := sysctlUint64([]uint32{_CTL_MACHDEP, _CPU_ID_AA64ISAR0}) + if !ok { + return + } + // Get ID_AA64PFR0 from sysctl. + pfr0, ok := sysctlUint64([]uint32{_CTL_MACHDEP, _CPU_ID_AA64PFR0}) + if !ok { + return + } + + parseARM64SystemRegisters(isar0, pfr0) +} diff --git a/testing/internal/cpu/cpu_arm64_other.go b/testing/internal/cpu/cpu_arm64_other.go new file mode 100644 index 0000000..44592cf --- /dev/null +++ b/testing/internal/cpu/cpu_arm64_other.go @@ -0,0 +1,13 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build arm64 && !linux && !freebsd && !android && (!darwin || ios) && !openbsd + +package cpu + +func osInit() { + // Other operating systems do not support reading HWCap from auxiliary vector, + // reading privileged aarch64 system registers or sysctl in user space to detect + // CPU features at runtime. +} diff --git a/testing/internal/cpu/cpu_loong64.go b/testing/internal/cpu/cpu_loong64.go new file mode 100644 index 0000000..92583d0 --- /dev/null +++ b/testing/internal/cpu/cpu_loong64.go @@ -0,0 +1,54 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build loong64 + +package cpu + +// CacheLinePadSize is used to prevent false sharing of cache lines. +// We choose 64 because Loongson 3A5000 the L1 Dcache is 4-way 256-line 64-byte-per-line. +const CacheLinePadSize = 64 + +// Bit fields for CPUCFG registers, Related reference documents: +// https://loongson.github.io/LoongArch-Documentation/LoongArch-Vol1-EN.html#_cpucfg +const ( + // CPUCFG1 bits + cpucfg1_CRC32 = 1 << 25 + + // CPUCFG2 bits + cpucfg2_LAM_BH = 1 << 27 + cpucfg2_LAMCAS = 1 << 28 +) + +// get_cpucfg is implemented in cpu_loong64.s. +func get_cpucfg(reg uint32) uint32 + +func doinit() { + options = []option{ + {Name: "lsx", Feature: &Loong64.HasLSX}, + {Name: "crc32", Feature: &Loong64.HasCRC32}, + {Name: "lamcas", Feature: &Loong64.HasLAMCAS}, + {Name: "lam_bh", Feature: &Loong64.HasLAM_BH}, + } + + // The CPUCFG data on Loong64 only reflects the hardware capabilities, + // not the kernel support status, so features such as LSX and LASX that + // require kernel support cannot be obtained from the CPUCFG data. + // + // These features only require hardware capability support and do not + // require kernel specific support, so they can be obtained directly + // through CPUCFG + cfg1 := get_cpucfg(1) + cfg2 := get_cpucfg(2) + + Loong64.HasCRC32 = cfgIsSet(cfg1, cpucfg1_CRC32) + Loong64.HasLAMCAS = cfgIsSet(cfg2, cpucfg2_LAM_BH) + Loong64.HasLAM_BH = cfgIsSet(cfg2, cpucfg2_LAMCAS) + + osInit() +} + +func cfgIsSet(cfg uint32, val uint32) bool { + return cfg&val != 0 +} diff --git a/testing/internal/cpu/cpu_loong64.s b/testing/internal/cpu/cpu_loong64.s new file mode 100644 index 0000000..f02a278 --- /dev/null +++ b/testing/internal/cpu/cpu_loong64.s @@ -0,0 +1,12 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +// func get_cpucfg(reg uint32) uint32 +TEXT ·get_cpucfg(SB), NOSPLIT|NOFRAME, $0-12 + MOVW reg+0(FP), R5 + CPUCFG R5, R4 + MOVW R4, ret+8(FP) + RET diff --git a/testing/internal/cpu/cpu_loong64_hwcap.go b/testing/internal/cpu/cpu_loong64_hwcap.go new file mode 100644 index 0000000..58397ad --- /dev/null +++ b/testing/internal/cpu/cpu_loong64_hwcap.go @@ -0,0 +1,26 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build loong64 && linux + +package cpu + +// This is initialized by archauxv and should not be changed after it is +// initialized. +var HWCap uint + +// HWCAP bits. These are exposed by the Linux kernel. +const ( + hwcap_LOONGARCH_LSX = 1 << 4 +) + +func hwcapInit() { + // TODO: Features that require kernel support like LSX and LASX can + // be detected here once needed in std library or by the compiler. + Loong64.HasLSX = hwcIsSet(HWCap, hwcap_LOONGARCH_LSX) +} + +func hwcIsSet(hwc uint, val uint) bool { + return hwc&val != 0 +} diff --git a/testing/internal/cpu/cpu_loong64_linux.go b/testing/internal/cpu/cpu_loong64_linux.go new file mode 100644 index 0000000..73bc384 --- /dev/null +++ b/testing/internal/cpu/cpu_loong64_linux.go @@ -0,0 +1,11 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build loong64 && linux + +package cpu + +func osInit() { + hwcapInit() +} diff --git a/testing/internal/cpu/cpu_mips.go b/testing/internal/cpu/cpu_mips.go new file mode 100644 index 0000000..14a9c97 --- /dev/null +++ b/testing/internal/cpu/cpu_mips.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +const CacheLinePadSize = 32 + +func doinit() { +} diff --git a/testing/internal/cpu/cpu_mips64x.go b/testing/internal/cpu/cpu_mips64x.go new file mode 100644 index 0000000..c452ffd --- /dev/null +++ b/testing/internal/cpu/cpu_mips64x.go @@ -0,0 +1,32 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build mips64 || mips64le + +package cpu + +const CacheLinePadSize = 32 + +// This is initialized by archauxv and should not be changed after it is +// initialized. +var HWCap uint + +// HWCAP bits. These are exposed by the Linux kernel 5.4. +const ( + // CPU features + hwcap_MIPS_MSA = 1 << 1 +) + +func doinit() { + options = []option{ + {Name: "msa", Feature: &MIPS64X.HasMSA}, + } + + // HWCAP feature bits + MIPS64X.HasMSA = isSet(HWCap, hwcap_MIPS_MSA) +} + +func isSet(hwc uint, value uint) bool { + return hwc&value != 0 +} diff --git a/testing/internal/cpu/cpu_mipsle.go b/testing/internal/cpu/cpu_mipsle.go new file mode 100644 index 0000000..14a9c97 --- /dev/null +++ b/testing/internal/cpu/cpu_mipsle.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +const CacheLinePadSize = 32 + +func doinit() { +} diff --git a/testing/internal/cpu/cpu_no_name.go b/testing/internal/cpu/cpu_no_name.go new file mode 100644 index 0000000..2adfa1b --- /dev/null +++ b/testing/internal/cpu/cpu_no_name.go @@ -0,0 +1,18 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !386 && !amd64 && !ppc64 && !ppc64le + +package cpu + +// Name returns the CPU name given by the vendor +// if it can be read directly from memory or by CPU instructions. +// If the CPU name can not be determined an empty string is returned. +// +// Implementations that use the Operating System (e.g. sysctl or /sys/) +// to gather CPU information for display should be placed in internal/sysinfo. +func Name() string { + // "A CPU has no name". + return "" +} diff --git a/testing/internal/cpu/cpu_ppc64x.go b/testing/internal/cpu/cpu_ppc64x.go new file mode 100644 index 0000000..c4a08fe --- /dev/null +++ b/testing/internal/cpu/cpu_ppc64x.go @@ -0,0 +1,35 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build ppc64 || ppc64le + +package cpu + +const CacheLinePadSize = 128 + +func doinit() { + options = []option{ + {Name: "darn", Feature: &PPC64.HasDARN}, + {Name: "scv", Feature: &PPC64.HasSCV}, + {Name: "power9", Feature: &PPC64.IsPOWER9}, + } + + osinit() +} + +func isSet(hwc uint, value uint) bool { + return hwc&value != 0 +} + +func Name() string { + switch { + case PPC64.IsPOWER10: + return "POWER10" + case PPC64.IsPOWER9: + return "POWER9" + case PPC64.IsPOWER8: + return "POWER8" + } + return "" +} diff --git a/testing/internal/cpu/cpu_ppc64x_aix.go b/testing/internal/cpu/cpu_ppc64x_aix.go new file mode 100644 index 0000000..f05ed6f --- /dev/null +++ b/testing/internal/cpu/cpu_ppc64x_aix.go @@ -0,0 +1,25 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build ppc64 || ppc64le + +package cpu + +const ( + // getsystemcfg constants + _SC_IMPL = 2 + _IMPL_POWER8 = 0x10000 + _IMPL_POWER9 = 0x20000 + _IMPL_POWER10 = 0x40000 +) + +func osinit() { + impl := getsystemcfg(_SC_IMPL) + PPC64.IsPOWER8 = isSet(impl, _IMPL_POWER8) + PPC64.IsPOWER9 = isSet(impl, _IMPL_POWER9) + PPC64.IsPOWER10 = isSet(impl, _IMPL_POWER10) +} + +// getsystemcfg is defined in runtime/os2_aix.go +func getsystemcfg(label uint) uint diff --git a/testing/internal/cpu/cpu_ppc64x_linux.go b/testing/internal/cpu/cpu_ppc64x_linux.go new file mode 100644 index 0000000..9df82ca --- /dev/null +++ b/testing/internal/cpu/cpu_ppc64x_linux.go @@ -0,0 +1,33 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build ppc64 || ppc64le + +package cpu + +// ppc64 doesn't have a 'cpuid' equivalent, so we rely on HWCAP/HWCAP2. +// These are initialized by archauxv and should not be changed after they are +// initialized. +var HWCap uint +var HWCap2 uint + +// HWCAP bits. These are exposed by Linux. +const ( + // ISA Level + hwcap2_ARCH_2_07 = 0x80000000 + hwcap2_ARCH_3_00 = 0x00800000 + hwcap2_ARCH_3_1 = 0x00040000 + + // CPU features + hwcap2_DARN = 0x00200000 + hwcap2_SCV = 0x00100000 +) + +func osinit() { + PPC64.IsPOWER8 = isSet(HWCap2, hwcap2_ARCH_2_07) + PPC64.IsPOWER9 = isSet(HWCap2, hwcap2_ARCH_3_00) + PPC64.IsPOWER10 = isSet(HWCap2, hwcap2_ARCH_3_1) + PPC64.HasDARN = isSet(HWCap2, hwcap2_DARN) + PPC64.HasSCV = isSet(HWCap2, hwcap2_SCV) +} diff --git a/testing/internal/cpu/cpu_ppc64x_other.go b/testing/internal/cpu/cpu_ppc64x_other.go new file mode 100644 index 0000000..d5b629d --- /dev/null +++ b/testing/internal/cpu/cpu_ppc64x_other.go @@ -0,0 +1,13 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (ppc64 || ppc64le) && !aix && !linux + +package cpu + +func osinit() { + // Other operating systems do not support reading HWCap from auxiliary vector, + // reading privileged system registers or sysctl in user space to detect CPU + // features at runtime. +} diff --git a/testing/internal/cpu/cpu_riscv64.go b/testing/internal/cpu/cpu_riscv64.go new file mode 100644 index 0000000..2173fe8 --- /dev/null +++ b/testing/internal/cpu/cpu_riscv64.go @@ -0,0 +1,10 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +const CacheLinePadSize = 64 + +func doinit() { +} diff --git a/testing/internal/cpu/cpu_s390x.go b/testing/internal/cpu/cpu_s390x.go new file mode 100644 index 0000000..45d8ed2 --- /dev/null +++ b/testing/internal/cpu/cpu_s390x.go @@ -0,0 +1,205 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +const CacheLinePadSize = 256 + +var HWCap uint + +// bitIsSet reports whether the bit at index is set. The bit index +// is in big endian order, so bit index 0 is the leftmost bit. +func bitIsSet(bits []uint64, index uint) bool { + return bits[index/64]&((1<<63)>>(index%64)) != 0 +} + +// function is the function code for the named function. +type function uint8 + +const ( + // KM{,A,C,CTR} function codes + aes128 function = 18 // AES-128 + aes192 function = 19 // AES-192 + aes256 function = 20 // AES-256 + + // K{I,L}MD function codes + sha1 function = 1 // SHA-1 + sha256 function = 2 // SHA-256 + sha512 function = 3 // SHA-512 + sha3_224 function = 32 // SHA3-224 + sha3_256 function = 33 // SHA3-256 + sha3_384 function = 34 // SHA3-384 + sha3_512 function = 35 // SHA3-512 + shake128 function = 36 // SHAKE-128 + shake256 function = 37 // SHAKE-256 + + // KLMD function codes + ghash function = 65 // GHASH +) + +const ( + // KDSA function codes + ecdsaVerifyP256 function = 1 // NIST P256 + ecdsaVerifyP384 function = 2 // NIST P384 + ecdsaVerifyP521 function = 3 // NIST P521 + ecdsaSignP256 function = 9 // NIST P256 + ecdsaSignP384 function = 10 // NIST P384 + ecdsaSignP521 function = 11 // NIST P521 + eddsaVerifyEd25519 function = 32 // Curve25519 + eddsaVerifyEd448 function = 36 // Curve448 + eddsaSignEd25519 function = 40 // Curve25519 + eddsaSignEd448 function = 44 // Curve448 +) + +// queryResult contains the result of a Query function +// call. Bits are numbered in big endian order so the +// leftmost bit (the MSB) is at index 0. +type queryResult struct { + bits [2]uint64 +} + +// Has reports whether the given functions are present. +func (q *queryResult) Has(fns ...function) bool { + if len(fns) == 0 { + panic("no function codes provided") + } + for _, f := range fns { + if !bitIsSet(q.bits[:], uint(f)) { + return false + } + } + return true +} + +// facility is a bit index for the named facility. +type facility uint8 + +const ( + // mandatory facilities + zarch facility = 1 // z architecture mode is active + stflef facility = 7 // store-facility-list-extended + ldisp facility = 18 // long-displacement + eimm facility = 21 // extended-immediate + + // miscellaneous facilities + dfp facility = 42 // decimal-floating-point + etf3eh facility = 30 // extended-translation 3 enhancement + + // cryptography facilities + msa facility = 17 // message-security-assist + msa3 facility = 76 // message-security-assist extension 3 + msa4 facility = 77 // message-security-assist extension 4 + msa5 facility = 57 // message-security-assist extension 5 + msa8 facility = 146 // message-security-assist extension 8 + msa9 facility = 155 // message-security-assist extension 9 + + // vector facilities + vxe facility = 135 // vector-enhancements 1 + + // Note: vx requires kernel support + // and so must be fetched from HWCAP. + + hwcap_VX = 1 << 11 // vector facility +) + +// facilityList contains the result of an STFLE call. +// Bits are numbered in big endian order so the +// leftmost bit (the MSB) is at index 0. +type facilityList struct { + bits [4]uint64 +} + +// Has reports whether the given facilities are present. +func (s *facilityList) Has(fs ...facility) bool { + if len(fs) == 0 { + panic("no facility bits provided") + } + for _, f := range fs { + if !bitIsSet(s.bits[:], uint(f)) { + return false + } + } + return true +} + +// The following feature detection functions are defined in cpu_s390x.s. +// They are likely to be expensive to call so the results should be cached. +func stfle() facilityList +func kmQuery() queryResult +func kmcQuery() queryResult +func kmctrQuery() queryResult +func kmaQuery() queryResult +func kimdQuery() queryResult +func klmdQuery() queryResult +func kdsaQuery() queryResult + +func doinit() { + options = []option{ + {Name: "zarch", Feature: &S390X.HasZARCH}, + {Name: "stfle", Feature: &S390X.HasSTFLE}, + {Name: "ldisp", Feature: &S390X.HasLDISP}, + {Name: "msa", Feature: &S390X.HasMSA}, + {Name: "eimm", Feature: &S390X.HasEIMM}, + {Name: "dfp", Feature: &S390X.HasDFP}, + {Name: "etf3eh", Feature: &S390X.HasETF3EH}, + {Name: "vx", Feature: &S390X.HasVX}, + {Name: "vxe", Feature: &S390X.HasVXE}, + {Name: "kdsa", Feature: &S390X.HasKDSA}, + } + + aes := []function{aes128, aes192, aes256} + facilities := stfle() + + S390X.HasZARCH = facilities.Has(zarch) + S390X.HasSTFLE = facilities.Has(stflef) + S390X.HasLDISP = facilities.Has(ldisp) + S390X.HasEIMM = facilities.Has(eimm) + S390X.HasDFP = facilities.Has(dfp) + S390X.HasETF3EH = facilities.Has(etf3eh) + S390X.HasMSA = facilities.Has(msa) + + if S390X.HasMSA { + // cipher message + km, kmc := kmQuery(), kmcQuery() + S390X.HasAES = km.Has(aes...) + S390X.HasAESCBC = kmc.Has(aes...) + if facilities.Has(msa4) { + kmctr := kmctrQuery() + S390X.HasAESCTR = kmctr.Has(aes...) + } + if facilities.Has(msa8) { + kma := kmaQuery() + S390X.HasAESGCM = kma.Has(aes...) + } + + // compute message digest + kimd := kimdQuery() // intermediate (no padding) + klmd := klmdQuery() // last (padding) + S390X.HasSHA1 = kimd.Has(sha1) && klmd.Has(sha1) + S390X.HasSHA256 = kimd.Has(sha256) && klmd.Has(sha256) + S390X.HasSHA512 = kimd.Has(sha512) && klmd.Has(sha512) + S390X.HasGHASH = kimd.Has(ghash) // KLMD-GHASH does not exist + sha3 := []function{ + sha3_224, sha3_256, sha3_384, sha3_512, + shake128, shake256, + } + S390X.HasSHA3 = kimd.Has(sha3...) && klmd.Has(sha3...) + S390X.HasKDSA = facilities.Has(msa9) // elliptic curves + if S390X.HasKDSA { + kdsa := kdsaQuery() + S390X.HasECDSA = kdsa.Has(ecdsaVerifyP256, ecdsaSignP256, ecdsaVerifyP384, ecdsaSignP384, ecdsaVerifyP521, ecdsaSignP521) + S390X.HasEDDSA = kdsa.Has(eddsaVerifyEd25519, eddsaSignEd25519, eddsaVerifyEd448, eddsaSignEd448) + } + } + + S390X.HasVX = isSet(HWCap, hwcap_VX) + + if S390X.HasVX { + S390X.HasVXE = facilities.Has(vxe) + } +} + +func isSet(hwc uint, value uint) bool { + return hwc&value != 0 +} diff --git a/testing/internal/cpu/cpu_s390x.s b/testing/internal/cpu/cpu_s390x.s new file mode 100644 index 0000000..488553c --- /dev/null +++ b/testing/internal/cpu/cpu_s390x.s @@ -0,0 +1,62 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +// func stfle() facilityList +TEXT ·stfle(SB), NOSPLIT|NOFRAME, $0-32 + MOVD $ret+0(FP), R1 + MOVD $3, R0 // last doubleword index to store + XC $32, (R1), (R1) // clear 4 doublewords (32 bytes) + WORD $0xb2b01000 // store facility list extended (STFLE) + RET + +// func kmQuery() queryResult +TEXT ·kmQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KM-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + KM R2, R4 // cipher message (KM) + RET + +// func kmcQuery() queryResult +TEXT ·kmcQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KMC-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + KMC R2, R4 // cipher message with chaining (KMC) + RET + +// func kmctrQuery() queryResult +TEXT ·kmctrQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KMCTR-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + KMCTR R2, R4, R4 // cipher message with counter (KMCTR) + RET + +// func kmaQuery() queryResult +TEXT ·kmaQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KMA-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + KMA R2, R6, R4 // cipher message with authentication (KMA) + RET + +// func kimdQuery() queryResult +TEXT ·kimdQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KIMD-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + KIMD R2, R4 // compute intermediate message digest (KIMD) + RET + +// func klmdQuery() queryResult +TEXT ·klmdQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KLMD-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + KLMD R2, R4 // compute last message digest (KLMD) + RET + +// func kdsaQuery() queryResult +TEXT ·kdsaQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KLMD-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + KDSA R0, R4 // compute digital signature authentication + RET diff --git a/testing/internal/cpu/cpu_s390x_test.go b/testing/internal/cpu/cpu_s390x_test.go new file mode 100644 index 0000000..97d8f87 --- /dev/null +++ b/testing/internal/cpu/cpu_s390x_test.go @@ -0,0 +1,64 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu_test + +import ( + "errors" + "os" + "regexp" + "testing" + + . "github.com/CodSpeedHQ/codspeed-go/testing/internal/cpu" +) + +func getFeatureList() ([]string, error) { + cpuinfo, err := os.ReadFile("/proc/cpuinfo") + if err != nil { + return nil, err + } + r := regexp.MustCompile("features\\s*:\\s*(.*)") + b := r.FindSubmatch(cpuinfo) + if len(b) < 2 { + return nil, errors.New("no feature list in /proc/cpuinfo") + } + return regexp.MustCompile("\\s+").Split(string(b[1]), -1), nil +} + +func TestS390XAgainstCPUInfo(t *testing.T) { + // mapping of linux feature strings to S390X fields + mapping := make(map[string]*bool) + for _, option := range Options { + mapping[option.Name] = option.Feature + } + + // these must be true on the machines Go supports + mandatory := make(map[string]bool) + mandatory["zarch"] = false + mandatory["eimm"] = false + mandatory["ldisp"] = false + mandatory["stfle"] = false + + features, err := getFeatureList() + if err != nil { + t.Error(err) + } + for _, feature := range features { + if _, ok := mandatory[feature]; ok { + mandatory[feature] = true + } + if flag, ok := mapping[feature]; ok { + if !*flag { + t.Errorf("feature '%v' not detected", feature) + } + } else { + t.Logf("no entry for '%v'", feature) + } + } + for k, v := range mandatory { + if !v { + t.Errorf("mandatory feature '%v' not detected", k) + } + } +} diff --git a/testing/internal/cpu/cpu_test.go b/testing/internal/cpu/cpu_test.go new file mode 100644 index 0000000..c6858e9 --- /dev/null +++ b/testing/internal/cpu/cpu_test.go @@ -0,0 +1,62 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu_test + +import ( + "os" + "os/exec" + "testing" + + . "github.com/CodSpeedHQ/codspeed-go/testing/internal/cpu" + "github.com/CodSpeedHQ/codspeed-go/testing/internal/godebug" + "github.com/CodSpeedHQ/codspeed-go/testing/internal/testenv" +) + +func MustHaveDebugOptionsSupport(t *testing.T) { + if !DebugOptions { + t.Skipf("skipping test: cpu feature options not supported by OS") + } +} + +func MustSupportFeatureDetection(t *testing.T) { + // TODO: add platforms that do not have CPU feature detection support. +} + +func runDebugOptionsTest(t *testing.T, test string, options string) { + MustHaveDebugOptionsSupport(t) + + testenv.MustHaveExec(t) + + env := "GODEBUG=" + options + + cmd := exec.Command(os.Args[0], "-test.run=^"+test+"$") + cmd.Env = append(cmd.Env, env) + + output, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("%s with %s: run failed: %v output:\n%s\n", + test, env, err, string(output)) + } +} + +func TestDisableAllCapabilities(t *testing.T) { + MustSupportFeatureDetection(t) + runDebugOptionsTest(t, "TestAllCapabilitiesDisabled", "cpu.all=off") +} + +func TestAllCapabilitiesDisabled(t *testing.T) { + MustHaveDebugOptionsSupport(t) + + if godebug.New("#cpu.all").Value() != "off" { + t.Skipf("skipping test: GODEBUG=cpu.all=off not set") + } + + for _, o := range Options { + want := false + if got := *o.Feature; got != want { + t.Errorf("%v: expected %v, got %v", o.Name, want, got) + } + } +} diff --git a/testing/internal/cpu/cpu_wasm.go b/testing/internal/cpu/cpu_wasm.go new file mode 100644 index 0000000..2310ad6 --- /dev/null +++ b/testing/internal/cpu/cpu_wasm.go @@ -0,0 +1,10 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +const CacheLinePadSize = 64 + +func doinit() { +} diff --git a/testing/internal/cpu/cpu_x86.go b/testing/internal/cpu/cpu_x86.go new file mode 100644 index 0000000..ee81207 --- /dev/null +++ b/testing/internal/cpu/cpu_x86.go @@ -0,0 +1,216 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build 386 || amd64 + +package cpu + +const CacheLinePadSize = 64 + +// cpuid is implemented in cpu_x86.s. +func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32) + +// xgetbv with ecx = 0 is implemented in cpu_x86.s. +func xgetbv() (eax, edx uint32) + +// getGOAMD64level is implemented in cpu_x86.s. Returns number in [1,4]. +func getGOAMD64level() int32 + +const ( + // ecx bits + cpuid_SSE3 = 1 << 0 + cpuid_PCLMULQDQ = 1 << 1 + cpuid_SSSE3 = 1 << 9 + cpuid_FMA = 1 << 12 + cpuid_SSE41 = 1 << 19 + cpuid_SSE42 = 1 << 20 + cpuid_POPCNT = 1 << 23 + cpuid_AES = 1 << 25 + cpuid_OSXSAVE = 1 << 27 + cpuid_AVX = 1 << 28 + + // ebx bits + cpuid_BMI1 = 1 << 3 + cpuid_AVX2 = 1 << 5 + cpuid_BMI2 = 1 << 8 + cpuid_ERMS = 1 << 9 + cpuid_AVX512F = 1 << 16 + cpuid_ADX = 1 << 19 + cpuid_SHA = 1 << 29 + cpuid_AVX512BW = 1 << 30 + cpuid_AVX512VL = 1 << 31 + // edx bits + cpuid_FSRM = 1 << 4 + // edx bits for CPUID 0x80000001 + cpuid_RDTSCP = 1 << 27 +) + +var maxExtendedFunctionInformation uint32 + +func doinit() { + options = []option{ + {Name: "adx", Feature: &X86.HasADX}, + {Name: "aes", Feature: &X86.HasAES}, + {Name: "erms", Feature: &X86.HasERMS}, + {Name: "fsrm", Feature: &X86.HasFSRM}, + {Name: "pclmulqdq", Feature: &X86.HasPCLMULQDQ}, + {Name: "rdtscp", Feature: &X86.HasRDTSCP}, + {Name: "sha", Feature: &X86.HasSHA}, + } + level := getGOAMD64level() + if level < 2 { + // These options are required at level 2. At lower levels + // they can be turned off. + options = append(options, + option{Name: "popcnt", Feature: &X86.HasPOPCNT}, + option{Name: "sse3", Feature: &X86.HasSSE3}, + option{Name: "sse41", Feature: &X86.HasSSE41}, + option{Name: "sse42", Feature: &X86.HasSSE42}, + option{Name: "ssse3", Feature: &X86.HasSSSE3}) + } + if level < 3 { + // These options are required at level 3. At lower levels + // they can be turned off. + options = append(options, + option{Name: "avx", Feature: &X86.HasAVX}, + option{Name: "avx2", Feature: &X86.HasAVX2}, + option{Name: "bmi1", Feature: &X86.HasBMI1}, + option{Name: "bmi2", Feature: &X86.HasBMI2}, + option{Name: "fma", Feature: &X86.HasFMA}) + } + if level < 4 { + // These options are required at level 4. At lower levels + // they can be turned off. + options = append(options, + option{Name: "avx512f", Feature: &X86.HasAVX512F}, + option{Name: "avx512bw", Feature: &X86.HasAVX512BW}, + option{Name: "avx512vl", Feature: &X86.HasAVX512VL}, + ) + } + + maxID, _, _, _ := cpuid(0, 0) + + if maxID < 1 { + return + } + + maxExtendedFunctionInformation, _, _, _ = cpuid(0x80000000, 0) + + _, _, ecx1, _ := cpuid(1, 0) + + X86.HasSSE3 = isSet(ecx1, cpuid_SSE3) + X86.HasPCLMULQDQ = isSet(ecx1, cpuid_PCLMULQDQ) + X86.HasSSSE3 = isSet(ecx1, cpuid_SSSE3) + X86.HasSSE41 = isSet(ecx1, cpuid_SSE41) + X86.HasSSE42 = isSet(ecx1, cpuid_SSE42) + X86.HasPOPCNT = isSet(ecx1, cpuid_POPCNT) + X86.HasAES = isSet(ecx1, cpuid_AES) + + // OSXSAVE can be false when using older Operating Systems + // or when explicitly disabled on newer Operating Systems by + // e.g. setting the xsavedisable boot option on Windows 10. + X86.HasOSXSAVE = isSet(ecx1, cpuid_OSXSAVE) + + // The FMA instruction set extension only has VEX prefixed instructions. + // VEX prefixed instructions require OSXSAVE to be enabled. + // See Intel 64 and IA-32 Architecture Software Developer’s Manual Volume 2 + // Section 2.4 "AVX and SSE Instruction Exception Specification" + X86.HasFMA = isSet(ecx1, cpuid_FMA) && X86.HasOSXSAVE + + osSupportsAVX := false + osSupportsAVX512 := false + // For XGETBV, OSXSAVE bit is required and sufficient. + if X86.HasOSXSAVE { + eax, _ := xgetbv() + // Check if XMM and YMM registers have OS support. + osSupportsAVX = isSet(eax, 1<<1) && isSet(eax, 1<<2) + + // AVX512 detection does not work on Darwin, + // see https://github.com/golang/go/issues/49233 + // + // Check if opmask, ZMMhi256 and Hi16_ZMM have OS support. + osSupportsAVX512 = osSupportsAVX && isSet(eax, 1<<5) && isSet(eax, 1<<6) && isSet(eax, 1<<7) + } + + X86.HasAVX = isSet(ecx1, cpuid_AVX) && osSupportsAVX + + if maxID < 7 { + return + } + + _, ebx7, _, edx7 := cpuid(7, 0) + X86.HasBMI1 = isSet(ebx7, cpuid_BMI1) + X86.HasAVX2 = isSet(ebx7, cpuid_AVX2) && osSupportsAVX + X86.HasBMI2 = isSet(ebx7, cpuid_BMI2) + X86.HasERMS = isSet(ebx7, cpuid_ERMS) + X86.HasADX = isSet(ebx7, cpuid_ADX) + X86.HasSHA = isSet(ebx7, cpuid_SHA) + + X86.HasAVX512F = isSet(ebx7, cpuid_AVX512F) && osSupportsAVX512 + if X86.HasAVX512F { + X86.HasAVX512BW = isSet(ebx7, cpuid_AVX512BW) + X86.HasAVX512VL = isSet(ebx7, cpuid_AVX512VL) + } + + X86.HasFSRM = isSet(edx7, cpuid_FSRM) + + var maxExtendedInformation uint32 + maxExtendedInformation, _, _, _ = cpuid(0x80000000, 0) + + if maxExtendedInformation < 0x80000001 { + return + } + + _, _, _, edxExt1 := cpuid(0x80000001, 0) + X86.HasRDTSCP = isSet(edxExt1, cpuid_RDTSCP) +} + +func isSet(hwc uint32, value uint32) bool { + return hwc&value != 0 +} + +// Name returns the CPU name given by the vendor. +// If the CPU name can not be determined an +// empty string is returned. +func Name() string { + if maxExtendedFunctionInformation < 0x80000004 { + return "" + } + + data := make([]byte, 0, 3*4*4) + + var eax, ebx, ecx, edx uint32 + eax, ebx, ecx, edx = cpuid(0x80000002, 0) + data = appendBytes(data, eax, ebx, ecx, edx) + eax, ebx, ecx, edx = cpuid(0x80000003, 0) + data = appendBytes(data, eax, ebx, ecx, edx) + eax, ebx, ecx, edx = cpuid(0x80000004, 0) + data = appendBytes(data, eax, ebx, ecx, edx) + + // Trim leading spaces. + for len(data) > 0 && data[0] == ' ' { + data = data[1:] + } + + // Trim tail after and including the first null byte. + for i, c := range data { + if c == '\x00' { + data = data[:i] + break + } + } + + return string(data) +} + +func appendBytes(b []byte, args ...uint32) []byte { + for _, arg := range args { + b = append(b, + byte((arg >> 0)), + byte((arg >> 8)), + byte((arg >> 16)), + byte((arg >> 24))) + } + return b +} diff --git a/testing/internal/cpu/cpu_x86.s b/testing/internal/cpu/cpu_x86.s new file mode 100644 index 0000000..2ee8eca --- /dev/null +++ b/testing/internal/cpu/cpu_x86.s @@ -0,0 +1,43 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build 386 || amd64 + +#include "textflag.h" + +// func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32) +TEXT ·cpuid(SB), NOSPLIT, $0-24 + MOVL eaxArg+0(FP), AX + MOVL ecxArg+4(FP), CX + CPUID + MOVL AX, eax+8(FP) + MOVL BX, ebx+12(FP) + MOVL CX, ecx+16(FP) + MOVL DX, edx+20(FP) + RET + +// func xgetbv() (eax, edx uint32) +TEXT ·xgetbv(SB),NOSPLIT,$0-8 + MOVL $0, CX + XGETBV + MOVL AX, eax+0(FP) + MOVL DX, edx+4(FP) + RET + +// func getGOAMD64level() int32 +TEXT ·getGOAMD64level(SB),NOSPLIT,$0-4 +#ifdef GOAMD64_v4 + MOVL $4, ret+0(FP) +#else +#ifdef GOAMD64_v3 + MOVL $3, ret+0(FP) +#else +#ifdef GOAMD64_v2 + MOVL $2, ret+0(FP) +#else + MOVL $1, ret+0(FP) +#endif +#endif +#endif + RET diff --git a/testing/internal/cpu/cpu_x86_test.go b/testing/internal/cpu/cpu_x86_test.go new file mode 100644 index 0000000..8bd9f5c --- /dev/null +++ b/testing/internal/cpu/cpu_x86_test.go @@ -0,0 +1,58 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build 386 || amd64 + +package cpu_test + +import ( + "testing" + + . "github.com/CodSpeedHQ/codspeed-go/testing/internal/cpu" + "github.com/CodSpeedHQ/codspeed-go/testing/internal/godebug" +) + +func TestX86ifAVX2hasAVX(t *testing.T) { + if X86.HasAVX2 && !X86.HasAVX { + t.Fatalf("HasAVX expected true when HasAVX2 is true, got false") + } +} + +func TestX86ifAVX512FhasAVX2(t *testing.T) { + if X86.HasAVX512F && !X86.HasAVX2 { + t.Fatalf("HasAVX2 expected true when HasAVX512F is true, got false") + } +} + +func TestX86ifAVX512BWhasAVX512F(t *testing.T) { + if X86.HasAVX512BW && !X86.HasAVX512F { + t.Fatalf("HasAVX512F expected true when HasAVX512BW is true, got false") + } +} + +func TestX86ifAVX512VLhasAVX512F(t *testing.T) { + if X86.HasAVX512VL && !X86.HasAVX512F { + t.Fatalf("HasAVX512F expected true when HasAVX512VL is true, got false") + } +} + +func TestDisableSSE3(t *testing.T) { + if GetGOAMD64level() > 1 { + t.Skip("skipping test: can't run on GOAMD64>v1 machines") + } + runDebugOptionsTest(t, "TestSSE3DebugOption", "cpu.sse3=off") +} + +func TestSSE3DebugOption(t *testing.T) { + MustHaveDebugOptionsSupport(t) + + if godebug.New("#cpu.sse3").Value() != "off" { + t.Skipf("skipping test: GODEBUG=cpu.sse3=off not set") + } + + want := false + if got := X86.HasSSE3; got != want { + t.Errorf("X86.HasSSE3 expected %v, got %v", want, got) + } +} diff --git a/testing/internal/cpu/export_test.go b/testing/internal/cpu/export_test.go new file mode 100644 index 0000000..91bfc1b --- /dev/null +++ b/testing/internal/cpu/export_test.go @@ -0,0 +1,9 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +var ( + Options = options +) diff --git a/testing/internal/cpu/export_x86_test.go b/testing/internal/cpu/export_x86_test.go new file mode 100644 index 0000000..a12b6f2 --- /dev/null +++ b/testing/internal/cpu/export_x86_test.go @@ -0,0 +1,11 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build 386 || amd64 + +package cpu + +var ( + GetGOAMD64level = getGOAMD64level +) diff --git a/testing/internal/diff/diff.go b/testing/internal/diff/diff.go new file mode 100644 index 0000000..6a40b23 --- /dev/null +++ b/testing/internal/diff/diff.go @@ -0,0 +1,261 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package diff + +import ( + "bytes" + "fmt" + "sort" + "strings" +) + +// A pair is a pair of values tracked for both the x and y side of a diff. +// It is typically a pair of line indexes. +type pair struct{ x, y int } + +// Diff returns an anchored diff of the two texts old and new +// in the “unified diff” format. If old and new are identical, +// Diff returns a nil slice (no output). +// +// Unix diff implementations typically look for a diff with +// the smallest number of lines inserted and removed, +// which can in the worst case take time quadratic in the +// number of lines in the texts. As a result, many implementations +// either can be made to run for a long time or cut off the search +// after a predetermined amount of work. +// +// In contrast, this implementation looks for a diff with the +// smallest number of “unique” lines inserted and removed, +// where unique means a line that appears just once in both old and new. +// We call this an “anchored diff” because the unique lines anchor +// the chosen matching regions. An anchored diff is usually clearer +// than a standard diff, because the algorithm does not try to +// reuse unrelated blank lines or closing braces. +// The algorithm also guarantees to run in O(n log n) time +// instead of the standard O(n²) time. +// +// Some systems call this approach a “patience diff,” named for +// the “patience sorting” algorithm, itself named for a solitaire card game. +// We avoid that name for two reasons. First, the name has been used +// for a few different variants of the algorithm, so it is imprecise. +// Second, the name is frequently interpreted as meaning that you have +// to wait longer (to be patient) for the diff, meaning that it is a slower algorithm, +// when in fact the algorithm is faster than the standard one. +func Diff(oldName string, old []byte, newName string, new []byte) []byte { + if bytes.Equal(old, new) { + return nil + } + x := lines(old) + y := lines(new) + + // Print diff header. + var out bytes.Buffer + fmt.Fprintf(&out, "diff %s %s\n", oldName, newName) + fmt.Fprintf(&out, "--- %s\n", oldName) + fmt.Fprintf(&out, "+++ %s\n", newName) + + // Loop over matches to consider, + // expanding each match to include surrounding lines, + // and then printing diff chunks. + // To avoid setup/teardown cases outside the loop, + // tgs returns a leading {0,0} and trailing {len(x), len(y)} pair + // in the sequence of matches. + var ( + done pair // printed up to x[:done.x] and y[:done.y] + chunk pair // start lines of current chunk + count pair // number of lines from each side in current chunk + ctext []string // lines for current chunk + ) + for _, m := range tgs(x, y) { + if m.x < done.x { + // Already handled scanning forward from earlier match. + continue + } + + // Expand matching lines as far as possible, + // establishing that x[start.x:end.x] == y[start.y:end.y]. + // Note that on the first (or last) iteration we may (or definitely do) + // have an empty match: start.x==end.x and start.y==end.y. + start := m + for start.x > done.x && start.y > done.y && x[start.x-1] == y[start.y-1] { + start.x-- + start.y-- + } + end := m + for end.x < len(x) && end.y < len(y) && x[end.x] == y[end.y] { + end.x++ + end.y++ + } + + // Emit the mismatched lines before start into this chunk. + // (No effect on first sentinel iteration, when start = {0,0}.) + for _, s := range x[done.x:start.x] { + ctext = append(ctext, "-"+s) + count.x++ + } + for _, s := range y[done.y:start.y] { + ctext = append(ctext, "+"+s) + count.y++ + } + + // If we're not at EOF and have too few common lines, + // the chunk includes all the common lines and continues. + const C = 3 // number of context lines + if (end.x < len(x) || end.y < len(y)) && + (end.x-start.x < C || (len(ctext) > 0 && end.x-start.x < 2*C)) { + for _, s := range x[start.x:end.x] { + ctext = append(ctext, " "+s) + count.x++ + count.y++ + } + done = end + continue + } + + // End chunk with common lines for context. + if len(ctext) > 0 { + n := end.x - start.x + if n > C { + n = C + } + for _, s := range x[start.x : start.x+n] { + ctext = append(ctext, " "+s) + count.x++ + count.y++ + } + done = pair{start.x + n, start.y + n} + + // Format and emit chunk. + // Convert line numbers to 1-indexed. + // Special case: empty file shows up as 0,0 not 1,0. + if count.x > 0 { + chunk.x++ + } + if count.y > 0 { + chunk.y++ + } + fmt.Fprintf(&out, "@@ -%d,%d +%d,%d @@\n", chunk.x, count.x, chunk.y, count.y) + for _, s := range ctext { + out.WriteString(s) + } + count.x = 0 + count.y = 0 + ctext = ctext[:0] + } + + // If we reached EOF, we're done. + if end.x >= len(x) && end.y >= len(y) { + break + } + + // Otherwise start a new chunk. + chunk = pair{end.x - C, end.y - C} + for _, s := range x[chunk.x:end.x] { + ctext = append(ctext, " "+s) + count.x++ + count.y++ + } + done = end + } + + return out.Bytes() +} + +// lines returns the lines in the file x, including newlines. +// If the file does not end in a newline, one is supplied +// along with a warning about the missing newline. +func lines(x []byte) []string { + l := strings.SplitAfter(string(x), "\n") + if l[len(l)-1] == "" { + l = l[:len(l)-1] + } else { + // Treat last line as having a message about the missing newline attached, + // using the same text as BSD/GNU diff (including the leading backslash). + l[len(l)-1] += "\n\\ No newline at end of file\n" + } + return l +} + +// tgs returns the pairs of indexes of the longest common subsequence +// of unique lines in x and y, where a unique line is one that appears +// once in x and once in y. +// +// The longest common subsequence algorithm is as described in +// Thomas G. Szymanski, “A Special Case of the Maximal Common +// Subsequence Problem,” Princeton TR #170 (January 1975), +// available at https://research.swtch.com/tgs170.pdf. +func tgs(x, y []string) []pair { + // Count the number of times each string appears in a and b. + // We only care about 0, 1, many, counted as 0, -1, -2 + // for the x side and 0, -4, -8 for the y side. + // Using negative numbers now lets us distinguish positive line numbers later. + m := make(map[string]int) + for _, s := range x { + if c := m[s]; c > -2 { + m[s] = c - 1 + } + } + for _, s := range y { + if c := m[s]; c > -8 { + m[s] = c - 4 + } + } + + // Now unique strings can be identified by m[s] = -1+-4. + // + // Gather the indexes of those strings in x and y, building: + // xi[i] = increasing indexes of unique strings in x. + // yi[i] = increasing indexes of unique strings in y. + // inv[i] = index j such that x[xi[i]] = y[yi[j]]. + var xi, yi, inv []int + for i, s := range y { + if m[s] == -1+-4 { + m[s] = len(yi) + yi = append(yi, i) + } + } + for i, s := range x { + if j, ok := m[s]; ok && j >= 0 { + xi = append(xi, i) + inv = append(inv, j) + } + } + + // Apply Algorithm A from Szymanski's paper. + // In those terms, A = J = inv and B = [0, n). + // We add sentinel pairs {0,0}, and {len(x),len(y)} + // to the returned sequence, to help the processing loop. + J := inv + n := len(xi) + T := make([]int, n) + L := make([]int, n) + for i := range T { + T[i] = n + 1 + } + for i := 0; i < n; i++ { + k := sort.Search(n, func(k int) bool { + return T[k] >= J[i] + }) + T[k] = J[i] + L[i] = k + 1 + } + k := 0 + for _, v := range L { + if k < v { + k = v + } + } + seq := make([]pair, 2+k) + seq[1+k] = pair{len(x), len(y)} // sentinel at end + lastj := n + for i := n - 1; i >= 0; i-- { + if L[i] == k && J[i] < lastj { + seq[k] = pair{xi[i], yi[J[i]]} + k-- + } + } + seq[0] = pair{0, 0} // sentinel at start + return seq +} diff --git a/testing/internal/diff/diff_test.go b/testing/internal/diff/diff_test.go new file mode 100644 index 0000000..e8b1ac0 --- /dev/null +++ b/testing/internal/diff/diff_test.go @@ -0,0 +1,44 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package diff + +import ( + "bytes" + "path/filepath" + "testing" + + "github.com/CodSpeedHQ/codspeed-go/testing/internal/txtar" +) + +func clean(text []byte) []byte { + text = bytes.ReplaceAll(text, []byte("$\n"), []byte("\n")) + text = bytes.TrimSuffix(text, []byte("^D\n")) + return text +} + +func Test(t *testing.T) { + files, _ := filepath.Glob("testdata/*.txt") + if len(files) == 0 { + t.Fatalf("no testdata") + } + + for _, file := range files { + t.Run(filepath.Base(file), func(t *testing.T) { + a, err := txtar.ParseFile(file) + if err != nil { + t.Fatal(err) + } + if len(a.Files) != 3 || a.Files[2].Name != "diff" { + t.Fatalf("%s: want three files, third named \"diff\"", file) + } + diffs := Diff(a.Files[0].Name, clean(a.Files[0].Data), a.Files[1].Name, clean(a.Files[1].Data)) + want := clean(a.Files[2].Data) + if !bytes.Equal(diffs, want) { + t.Fatalf("%s: have:\n%s\nwant:\n%s\n%s", file, + diffs, want, Diff("have", diffs, "want", want)) + } + }) + } +} diff --git a/testing/internal/diff/testdata/allnew.txt b/testing/internal/diff/testdata/allnew.txt new file mode 100644 index 0000000..8875649 --- /dev/null +++ b/testing/internal/diff/testdata/allnew.txt @@ -0,0 +1,13 @@ +-- old -- +-- new -- +a +b +c +-- diff -- +diff old new +--- old ++++ new +@@ -0,0 +1,3 @@ ++a ++b ++c diff --git a/testing/internal/diff/testdata/allold.txt b/testing/internal/diff/testdata/allold.txt new file mode 100644 index 0000000..bcc9ac0 --- /dev/null +++ b/testing/internal/diff/testdata/allold.txt @@ -0,0 +1,13 @@ +-- old -- +a +b +c +-- new -- +-- diff -- +diff old new +--- old ++++ new +@@ -1,3 +0,0 @@ +-a +-b +-c diff --git a/testing/internal/diff/testdata/basic.txt b/testing/internal/diff/testdata/basic.txt new file mode 100644 index 0000000..d2565b5 --- /dev/null +++ b/testing/internal/diff/testdata/basic.txt @@ -0,0 +1,35 @@ +Example from Hunt and McIlroy, “An Algorithm for Differential File Comparison.” +https://www.cs.dartmouth.edu/~doug/diff.pdf + +-- old -- +a +b +c +d +e +f +g +-- new -- +w +a +b +x +y +z +e +-- diff -- +diff old new +--- old ++++ new +@@ -1,7 +1,7 @@ ++w + a + b +-c +-d ++x ++y ++z + e +-f +-g diff --git a/testing/internal/diff/testdata/dups.txt b/testing/internal/diff/testdata/dups.txt new file mode 100644 index 0000000..d10524d --- /dev/null +++ b/testing/internal/diff/testdata/dups.txt @@ -0,0 +1,40 @@ +-- old -- +a + +b + +c + +d + +e + +f +-- new -- +a + +B + +C + +d + +e + +f +-- diff -- +diff old new +--- old ++++ new +@@ -1,8 +1,8 @@ + a + $ +-b +- +-c ++B ++ ++C + $ + d + $ diff --git a/testing/internal/diff/testdata/end.txt b/testing/internal/diff/testdata/end.txt new file mode 100644 index 0000000..158637c --- /dev/null +++ b/testing/internal/diff/testdata/end.txt @@ -0,0 +1,38 @@ +-- old -- +1 +2 +3 +4 +5 +6 +7 +eight +nine +ten +eleven +-- new -- +1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +-- diff -- +diff old new +--- old ++++ new +@@ -5,7 +5,6 @@ + 5 + 6 + 7 +-eight +-nine +-ten +-eleven ++8 ++9 ++10 diff --git a/testing/internal/diff/testdata/eof.txt b/testing/internal/diff/testdata/eof.txt new file mode 100644 index 0000000..5dc145c --- /dev/null +++ b/testing/internal/diff/testdata/eof.txt @@ -0,0 +1,9 @@ +-- old -- +a +b +c^D +-- new -- +a +b +c^D +-- diff -- diff --git a/testing/internal/diff/testdata/eof1.txt b/testing/internal/diff/testdata/eof1.txt new file mode 100644 index 0000000..1ebf621 --- /dev/null +++ b/testing/internal/diff/testdata/eof1.txt @@ -0,0 +1,18 @@ +-- old -- +a +b +c +-- new -- +a +b +c^D +-- diff -- +diff old new +--- old ++++ new +@@ -1,3 +1,3 @@ + a + b +-c ++c +\ No newline at end of file diff --git a/testing/internal/diff/testdata/eof2.txt b/testing/internal/diff/testdata/eof2.txt new file mode 100644 index 0000000..047705e --- /dev/null +++ b/testing/internal/diff/testdata/eof2.txt @@ -0,0 +1,18 @@ +-- old -- +a +b +c^D +-- new -- +a +b +c +-- diff -- +diff old new +--- old ++++ new +@@ -1,3 +1,3 @@ + a + b +-c +\ No newline at end of file ++c diff --git a/testing/internal/diff/testdata/long.txt b/testing/internal/diff/testdata/long.txt new file mode 100644 index 0000000..3fc99f7 --- /dev/null +++ b/testing/internal/diff/testdata/long.txt @@ -0,0 +1,62 @@ +-- old -- +1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +14½ +15 +16 +17 +18 +19 +20 +-- new -- +1 +2 +3 +4 +5 +6 +8 +9 +10 +11 +12 +13 +14 +17 +18 +19 +20 +-- diff -- +diff old new +--- old ++++ new +@@ -4,7 +4,6 @@ + 4 + 5 + 6 +-7 + 8 + 9 + 10 +@@ -12,9 +11,6 @@ + 12 + 13 + 14 +-14½ +-15 +-16 + 17 + 18 + 19 diff --git a/testing/internal/diff/testdata/same.txt b/testing/internal/diff/testdata/same.txt new file mode 100644 index 0000000..86b1100 --- /dev/null +++ b/testing/internal/diff/testdata/same.txt @@ -0,0 +1,5 @@ +-- old -- +hello world +-- new -- +hello world +-- diff -- diff --git a/testing/internal/diff/testdata/start.txt b/testing/internal/diff/testdata/start.txt new file mode 100644 index 0000000..217b2fd --- /dev/null +++ b/testing/internal/diff/testdata/start.txt @@ -0,0 +1,34 @@ +-- old -- +e +pi +4 +5 +6 +7 +8 +9 +10 +-- new -- +1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +-- diff -- +diff old new +--- old ++++ new +@@ -1,5 +1,6 @@ +-e +-pi ++1 ++2 ++3 + 4 + 5 + 6 diff --git a/testing/internal/diff/testdata/triv.txt b/testing/internal/diff/testdata/triv.txt new file mode 100644 index 0000000..ab5759f --- /dev/null +++ b/testing/internal/diff/testdata/triv.txt @@ -0,0 +1,40 @@ +Another example from Hunt and McIlroy, +“An Algorithm for Differential File Comparison.” +https://www.cs.dartmouth.edu/~doug/diff.pdf + +Anchored diff gives up on finding anything, +since there are no unique lines. + +-- old -- +a +b +c +a +b +b +a +-- new -- +c +a +b +a +b +c +-- diff -- +diff old new +--- old ++++ new +@@ -1,7 +1,6 @@ +-a +-b +-c +-a +-b +-b +-a ++c ++a ++b ++a ++b ++c diff --git a/testing/internal/fuzz/counters_supported.go b/testing/internal/fuzz/counters_supported.go new file mode 100644 index 0000000..79e27d2 --- /dev/null +++ b/testing/internal/fuzz/counters_supported.go @@ -0,0 +1,21 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (darwin || linux || windows || freebsd) && (amd64 || arm64) + +package fuzz + +import ( + "unsafe" +) + +// coverage returns a []byte containing unique 8-bit counters for each edge of +// the instrumented source code. This coverage data will only be generated if +// `-d=libfuzzer` is set at build time. This can be used to understand the code +// coverage of a test execution. +func coverage() []byte { + addr := unsafe.Pointer(&_counters) + size := uintptr(unsafe.Pointer(&_ecounters)) - uintptr(addr) + return unsafe.Slice((*byte)(addr), int(size)) +} diff --git a/testing/internal/fuzz/counters_unsupported.go b/testing/internal/fuzz/counters_unsupported.go new file mode 100644 index 0000000..287bb4b --- /dev/null +++ b/testing/internal/fuzz/counters_unsupported.go @@ -0,0 +1,24 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// TODO: expand the set of supported platforms, with testing. Nothing about +// the instrumentation is OS specific, but only amd64 and arm64 are +// supported in the runtime. See src/runtime/libfuzzer*. +// +// If you update this constraint, also update internal/platform.FuzzInstrumented. +// +//go:build !((darwin || linux || windows || freebsd) && (amd64 || arm64)) + +package fuzz + +// TODO(#48504): re-enable on platforms where instrumentation works. +// In theory, we shouldn't need this file at all: if the binary was built +// without coverage, then _counters and _ecounters should have the same address. +// However, this caused an init failure on aix/ppc64, so it's disabled here. + +// coverage returns a []byte containing unique 8-bit counters for each edge of +// the instrumented source code. This coverage data will only be generated if +// `-d=libfuzzer` is set at build time. This can be used to understand the code +// coverage of a test execution. +func coverage() []byte { return nil } diff --git a/testing/internal/fuzz/coverage.go b/testing/internal/fuzz/coverage.go new file mode 100644 index 0000000..e214a7b --- /dev/null +++ b/testing/internal/fuzz/coverage.go @@ -0,0 +1,105 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package fuzz + +import ( + "fmt" + "math/bits" +) + +// ResetCoverage sets all of the counters for each edge of the instrumented +// source code to 0. +func ResetCoverage() { + cov := coverage() + clear(cov) +} + +// SnapshotCoverage copies the current counter values into coverageSnapshot, +// preserving them for later inspection. SnapshotCoverage also rounds each +// counter down to the nearest power of two. This lets the coordinator store +// multiple values for each counter by OR'ing them together. +func SnapshotCoverage() { + cov := coverage() + for i, b := range cov { + b |= b >> 1 + b |= b >> 2 + b |= b >> 4 + b -= b >> 1 + coverageSnapshot[i] = b + } +} + +// diffCoverage returns a set of bits set in snapshot but not in base. +// If there are no new bits set, diffCoverage returns nil. +func diffCoverage(base, snapshot []byte) []byte { + if len(base) != len(snapshot) { + panic(fmt.Sprintf("the number of coverage bits changed: before=%d, after=%d", len(base), len(snapshot))) + } + found := false + for i := range snapshot { + if snapshot[i]&^base[i] != 0 { + found = true + break + } + } + if !found { + return nil + } + diff := make([]byte, len(snapshot)) + for i := range diff { + diff[i] = snapshot[i] &^ base[i] + } + return diff +} + +// countNewCoverageBits returns the number of bits set in snapshot that are not +// set in base. +func countNewCoverageBits(base, snapshot []byte) int { + n := 0 + for i := range snapshot { + n += bits.OnesCount8(snapshot[i] &^ base[i]) + } + return n +} + +// isCoverageSubset returns true if all the base coverage bits are set in +// snapshot. +func isCoverageSubset(base, snapshot []byte) bool { + for i, v := range base { + if v&snapshot[i] != v { + return false + } + } + return true +} + +// hasCoverageBit returns true if snapshot has at least one bit set that is +// also set in base. +func hasCoverageBit(base, snapshot []byte) bool { + for i := range snapshot { + if snapshot[i]&base[i] != 0 { + return true + } + } + return false +} + +func countBits(cov []byte) int { + n := 0 + for _, c := range cov { + n += bits.OnesCount8(c) + } + return n +} + +var ( + coverageEnabled = len(coverage()) > 0 + coverageSnapshot = make([]byte, len(coverage())) + + // _counters and _ecounters mark the start and end, respectively, of where + // the 8-bit coverage counters reside in memory. They're known to cmd/link, + // which specially assigns their addresses for this purpose. + _counters, _ecounters [0]byte +) diff --git a/testing/internal/fuzz/encoding.go b/testing/internal/fuzz/encoding.go new file mode 100644 index 0000000..270ef7a --- /dev/null +++ b/testing/internal/fuzz/encoding.go @@ -0,0 +1,361 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package fuzz + +import ( + "bytes" + "fmt" + "go/ast" + "go/parser" + "go/token" + "math" + "strconv" + "strings" + "unicode/utf8" +) + +// encVersion1 will be the first line of a file with version 1 encoding. +var encVersion1 = "go test fuzz v1" + +// marshalCorpusFile encodes an arbitrary number of arguments into the file format for the +// corpus. +func marshalCorpusFile(vals ...any) []byte { + if len(vals) == 0 { + panic("must have at least one value to marshal") + } + b := bytes.NewBuffer([]byte(encVersion1 + "\n")) + // TODO(katiehockman): keep uint8 and int32 encoding where applicable, + // instead of changing to byte and rune respectively. + for _, val := range vals { + switch t := val.(type) { + case int, int8, int16, int64, uint, uint16, uint32, uint64, bool: + fmt.Fprintf(b, "%T(%v)\n", t, t) + case float32: + if math.IsNaN(float64(t)) && math.Float32bits(t) != math.Float32bits(float32(math.NaN())) { + // We encode unusual NaNs as hex values, because that is how users are + // likely to encounter them in literature about floating-point encoding. + // This allows us to reproduce fuzz failures that depend on the specific + // NaN representation (for float32 there are about 2^24 possibilities!), + // not just the fact that the value is *a* NaN. + // + // Note that the specific value of float32(math.NaN()) can vary based on + // whether the architecture represents signaling NaNs using a low bit + // (as is common) or a high bit (as commonly implemented on MIPS + // hardware before around 2012). We believe that the increase in clarity + // from identifying "NaN" with math.NaN() is worth the slight ambiguity + // from a platform-dependent value. + fmt.Fprintf(b, "math.Float32frombits(0x%x)\n", math.Float32bits(t)) + } else { + // We encode all other values — including the NaN value that is + // bitwise-identical to float32(math.Nan()) — using the default + // formatting, which is equivalent to strconv.FormatFloat with format + // 'g' and can be parsed by strconv.ParseFloat. + // + // For an ordinary floating-point number this format includes + // sufficiently many digits to reconstruct the exact value. For positive + // or negative infinity it is the string "+Inf" or "-Inf". For positive + // or negative zero it is "0" or "-0". For NaN, it is the string "NaN". + fmt.Fprintf(b, "%T(%v)\n", t, t) + } + case float64: + if math.IsNaN(t) && math.Float64bits(t) != math.Float64bits(math.NaN()) { + fmt.Fprintf(b, "math.Float64frombits(0x%x)\n", math.Float64bits(t)) + } else { + fmt.Fprintf(b, "%T(%v)\n", t, t) + } + case string: + fmt.Fprintf(b, "string(%q)\n", t) + case rune: // int32 + // Although rune and int32 are represented by the same type, only a subset + // of valid int32 values can be expressed as rune literals. Notably, + // negative numbers, surrogate halves, and values above unicode.MaxRune + // have no quoted representation. + // + // fmt with "%q" (and the corresponding functions in the strconv package) + // would quote out-of-range values to the Unicode replacement character + // instead of the original value (see https://go.dev/issue/51526), so + // they must be treated as int32 instead. + // + // We arbitrarily draw the line at UTF-8 validity, which biases toward the + // "rune" interpretation. (However, we accept either format as input.) + if utf8.ValidRune(t) { + fmt.Fprintf(b, "rune(%q)\n", t) + } else { + fmt.Fprintf(b, "int32(%v)\n", t) + } + case byte: // uint8 + // For bytes, we arbitrarily prefer the character interpretation. + // (Every byte has a valid character encoding.) + fmt.Fprintf(b, "byte(%q)\n", t) + case []byte: // []uint8 + fmt.Fprintf(b, "[]byte(%q)\n", t) + default: + panic(fmt.Sprintf("unsupported type: %T", t)) + } + } + return b.Bytes() +} + +// unmarshalCorpusFile decodes corpus bytes into their respective values. +func unmarshalCorpusFile(b []byte) ([]any, error) { + if len(b) == 0 { + return nil, fmt.Errorf("cannot unmarshal empty string") + } + lines := bytes.Split(b, []byte("\n")) + if len(lines) < 2 { + return nil, fmt.Errorf("must include version and at least one value") + } + version := strings.TrimSuffix(string(lines[0]), "\r") + if version != encVersion1 { + return nil, fmt.Errorf("unknown encoding version: %s", version) + } + var vals []any + for _, line := range lines[1:] { + line = bytes.TrimSpace(line) + if len(line) == 0 { + continue + } + v, err := parseCorpusValue(line) + if err != nil { + return nil, fmt.Errorf("malformed line %q: %v", line, err) + } + vals = append(vals, v) + } + return vals, nil +} + +func parseCorpusValue(line []byte) (any, error) { + fs := token.NewFileSet() + expr, err := parser.ParseExprFrom(fs, "(test)", line, 0) + if err != nil { + return nil, err + } + call, ok := expr.(*ast.CallExpr) + if !ok { + return nil, fmt.Errorf("expected call expression") + } + if len(call.Args) != 1 { + return nil, fmt.Errorf("expected call expression with 1 argument; got %d", len(call.Args)) + } + arg := call.Args[0] + + if arrayType, ok := call.Fun.(*ast.ArrayType); ok { + if arrayType.Len != nil { + return nil, fmt.Errorf("expected []byte or primitive type") + } + elt, ok := arrayType.Elt.(*ast.Ident) + if !ok || elt.Name != "byte" { + return nil, fmt.Errorf("expected []byte") + } + lit, ok := arg.(*ast.BasicLit) + if !ok || lit.Kind != token.STRING { + return nil, fmt.Errorf("string literal required for type []byte") + } + s, err := strconv.Unquote(lit.Value) + if err != nil { + return nil, err + } + return []byte(s), nil + } + + var idType *ast.Ident + if selector, ok := call.Fun.(*ast.SelectorExpr); ok { + xIdent, ok := selector.X.(*ast.Ident) + if !ok || xIdent.Name != "math" { + return nil, fmt.Errorf("invalid selector type") + } + switch selector.Sel.Name { + case "Float64frombits": + idType = &ast.Ident{Name: "float64-bits"} + case "Float32frombits": + idType = &ast.Ident{Name: "float32-bits"} + default: + return nil, fmt.Errorf("invalid selector type") + } + } else { + idType, ok = call.Fun.(*ast.Ident) + if !ok { + return nil, fmt.Errorf("expected []byte or primitive type") + } + if idType.Name == "bool" { + id, ok := arg.(*ast.Ident) + if !ok { + return nil, fmt.Errorf("malformed bool") + } + if id.Name == "true" { + return true, nil + } else if id.Name == "false" { + return false, nil + } else { + return nil, fmt.Errorf("true or false required for type bool") + } + } + } + + var ( + val string + kind token.Token + ) + if op, ok := arg.(*ast.UnaryExpr); ok { + switch lit := op.X.(type) { + case *ast.BasicLit: + if op.Op != token.SUB { + return nil, fmt.Errorf("unsupported operation on int/float: %v", op.Op) + } + // Special case for negative numbers. + val = op.Op.String() + lit.Value // e.g. "-" + "124" + kind = lit.Kind + case *ast.Ident: + if lit.Name != "Inf" { + return nil, fmt.Errorf("expected operation on int or float type") + } + if op.Op == token.SUB { + val = "-Inf" + } else { + val = "+Inf" + } + kind = token.FLOAT + default: + return nil, fmt.Errorf("expected operation on int or float type") + } + } else { + switch lit := arg.(type) { + case *ast.BasicLit: + val, kind = lit.Value, lit.Kind + case *ast.Ident: + if lit.Name != "NaN" { + return nil, fmt.Errorf("literal value required for primitive type") + } + val, kind = "NaN", token.FLOAT + default: + return nil, fmt.Errorf("literal value required for primitive type") + } + } + + switch typ := idType.Name; typ { + case "string": + if kind != token.STRING { + return nil, fmt.Errorf("string literal value required for type string") + } + return strconv.Unquote(val) + case "byte", "rune": + if kind == token.INT { + switch typ { + case "rune": + return parseInt(val, typ) + case "byte": + return parseUint(val, typ) + } + } + if kind != token.CHAR { + return nil, fmt.Errorf("character literal required for byte/rune types") + } + n := len(val) + if n < 2 { + return nil, fmt.Errorf("malformed character literal, missing single quotes") + } + code, _, _, err := strconv.UnquoteChar(val[1:n-1], '\'') + if err != nil { + return nil, err + } + if typ == "rune" { + return code, nil + } + if code >= 256 { + return nil, fmt.Errorf("can only encode single byte to a byte type") + } + return byte(code), nil + case "int", "int8", "int16", "int32", "int64": + if kind != token.INT { + return nil, fmt.Errorf("integer literal required for int types") + } + return parseInt(val, typ) + case "uint", "uint8", "uint16", "uint32", "uint64": + if kind != token.INT { + return nil, fmt.Errorf("integer literal required for uint types") + } + return parseUint(val, typ) + case "float32": + if kind != token.FLOAT && kind != token.INT { + return nil, fmt.Errorf("float or integer literal required for float32 type") + } + v, err := strconv.ParseFloat(val, 32) + return float32(v), err + case "float64": + if kind != token.FLOAT && kind != token.INT { + return nil, fmt.Errorf("float or integer literal required for float64 type") + } + return strconv.ParseFloat(val, 64) + case "float32-bits": + if kind != token.INT { + return nil, fmt.Errorf("integer literal required for math.Float32frombits type") + } + bits, err := parseUint(val, "uint32") + if err != nil { + return nil, err + } + return math.Float32frombits(bits.(uint32)), nil + case "float64-bits": + if kind != token.FLOAT && kind != token.INT { + return nil, fmt.Errorf("integer literal required for math.Float64frombits type") + } + bits, err := parseUint(val, "uint64") + if err != nil { + return nil, err + } + return math.Float64frombits(bits.(uint64)), nil + default: + return nil, fmt.Errorf("expected []byte or primitive type") + } +} + +// parseInt returns an integer of value val and type typ. +func parseInt(val, typ string) (any, error) { + switch typ { + case "int": + // The int type may be either 32 or 64 bits. If 32, the fuzz tests in the + // corpus may include 64-bit values produced by fuzzing runs on 64-bit + // architectures. When running those tests, we implicitly wrap the values to + // fit in a regular int. (The test case is still “interesting”, even if the + // specific values of its inputs are platform-dependent.) + i, err := strconv.ParseInt(val, 0, 64) + return int(i), err + case "int8": + i, err := strconv.ParseInt(val, 0, 8) + return int8(i), err + case "int16": + i, err := strconv.ParseInt(val, 0, 16) + return int16(i), err + case "int32", "rune": + i, err := strconv.ParseInt(val, 0, 32) + return int32(i), err + case "int64": + return strconv.ParseInt(val, 0, 64) + default: + panic("unreachable") + } +} + +// parseUint returns an unsigned integer of value val and type typ. +func parseUint(val, typ string) (any, error) { + switch typ { + case "uint": + i, err := strconv.ParseUint(val, 0, 64) + return uint(i), err + case "uint8", "byte": + i, err := strconv.ParseUint(val, 0, 8) + return uint8(i), err + case "uint16": + i, err := strconv.ParseUint(val, 0, 16) + return uint16(i), err + case "uint32": + i, err := strconv.ParseUint(val, 0, 32) + return uint32(i), err + case "uint64": + return strconv.ParseUint(val, 0, 64) + default: + panic("unreachable") + } +} diff --git a/testing/internal/fuzz/encoding_test.go b/testing/internal/fuzz/encoding_test.go new file mode 100644 index 0000000..2971794 --- /dev/null +++ b/testing/internal/fuzz/encoding_test.go @@ -0,0 +1,406 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package fuzz + +import ( + "math" + "strconv" + "testing" + "unicode" +) + +func TestUnmarshalMarshal(t *testing.T) { + var tests = []struct { + desc string + in string + reject bool + want string // if different from in + }{ + { + desc: "missing version", + in: "int(1234)", + reject: true, + }, + { + desc: "malformed string", + in: `go test fuzz v1 +string("a"bcad")`, + reject: true, + }, + { + desc: "empty value", + in: `go test fuzz v1 +int()`, + reject: true, + }, + { + desc: "negative uint", + in: `go test fuzz v1 +uint(-32)`, + reject: true, + }, + { + desc: "int8 too large", + in: `go test fuzz v1 +int8(1234456)`, + reject: true, + }, + { + desc: "multiplication in int value", + in: `go test fuzz v1 +int(20*5)`, + reject: true, + }, + { + desc: "double negation", + in: `go test fuzz v1 +int(--5)`, + reject: true, + }, + { + desc: "malformed bool", + in: `go test fuzz v1 +bool(0)`, + reject: true, + }, + { + desc: "malformed byte", + in: `go test fuzz v1 +byte('aa)`, + reject: true, + }, + { + desc: "byte out of range", + in: `go test fuzz v1 +byte('☃')`, + reject: true, + }, + { + desc: "extra newline", + in: `go test fuzz v1 +string("has extra newline") +`, + want: `go test fuzz v1 +string("has extra newline")`, + }, + { + desc: "trailing spaces", + in: `go test fuzz v1 +string("extra") +[]byte("spacing") + `, + want: `go test fuzz v1 +string("extra") +[]byte("spacing")`, + }, + { + desc: "float types", + in: `go test fuzz v1 +float64(0) +float32(0)`, + }, + { + desc: "various types", + in: `go test fuzz v1 +int(-23) +int8(-2) +int64(2342425) +uint(1) +uint16(234) +uint32(352342) +uint64(123) +rune('œ') +byte('K') +byte('ÿ') +[]byte("hello¿") +[]byte("a") +bool(true) +string("hello\\xbd\\xb2=\\xbc ⌘") +float64(-12.5) +float32(2.5)`, + }, + { + desc: "float edge cases", + // The two IEEE 754 bit patterns used for the math.Float{64,32}frombits + // encodings are non-math.NAN quiet-NaN values. Since they are not equal + // to math.NaN(), they should be re-encoded to their bit patterns. They + // are, respectively: + // * math.Float64bits(math.NaN())+1 + // * math.Float32bits(float32(math.NaN()))+1 + in: `go test fuzz v1 +float32(-0) +float64(-0) +float32(+Inf) +float32(-Inf) +float32(NaN) +float64(+Inf) +float64(-Inf) +float64(NaN) +math.Float64frombits(0x7ff8000000000002) +math.Float32frombits(0x7fc00001)`, + }, + { + desc: "int variations", + // Although we arbitrarily choose default integer bases (0 or 16), we may + // want to change those arbitrary choices in the future and should not + // break the parser. Verify that integers in the opposite bases still + // parse correctly. + in: `go test fuzz v1 +int(0x0) +int32(0x41) +int64(0xfffffffff) +uint32(0xcafef00d) +uint64(0xffffffffffffffff) +uint8(0b0000000) +byte(0x0) +byte('\000') +byte('\u0000') +byte('\'') +math.Float64frombits(9221120237041090562) +math.Float32frombits(2143289345)`, + want: `go test fuzz v1 +int(0) +rune('A') +int64(68719476735) +uint32(3405705229) +uint64(18446744073709551615) +byte('\x00') +byte('\x00') +byte('\x00') +byte('\x00') +byte('\'') +math.Float64frombits(0x7ff8000000000002) +math.Float32frombits(0x7fc00001)`, + }, + { + desc: "rune validation", + in: `go test fuzz v1 +rune(0) +rune(0x41) +rune(-1) +rune(0xfffd) +rune(0xd800) +rune(0x10ffff) +rune(0x110000) +`, + want: `go test fuzz v1 +rune('\x00') +rune('A') +int32(-1) +rune('�') +int32(55296) +rune('\U0010ffff') +int32(1114112)`, + }, + { + desc: "int overflow", + in: `go test fuzz v1 +int(0x7fffffffffffffff) +uint(0xffffffffffffffff)`, + want: func() string { + switch strconv.IntSize { + case 32: + return `go test fuzz v1 +int(-1) +uint(4294967295)` + case 64: + return `go test fuzz v1 +int(9223372036854775807) +uint(18446744073709551615)` + default: + panic("unreachable") + } + }(), + }, + { + desc: "windows new line", + in: "go test fuzz v1\r\nint(0)\r\n", + want: "go test fuzz v1\nint(0)", + }, + } + for _, test := range tests { + t.Run(test.desc, func(t *testing.T) { + vals, err := unmarshalCorpusFile([]byte(test.in)) + if test.reject { + if err == nil { + t.Fatalf("unmarshal unexpected success") + } + return + } + if err != nil { + t.Fatalf("unmarshal unexpected error: %v", err) + } + newB := marshalCorpusFile(vals...) + if newB[len(newB)-1] != '\n' { + t.Error("didn't write final newline to corpus file") + } + + want := test.want + if want == "" { + want = test.in + } + want += "\n" + got := string(newB) + if got != want { + t.Errorf("unexpected marshaled value\ngot:\n%s\nwant:\n%s", got, want) + } + }) + } +} + +// BenchmarkMarshalCorpusFile measures the time it takes to serialize byte +// slices of various sizes to a corpus file. The slice contains a repeating +// sequence of bytes 0-255 to mix escaped and non-escaped characters. +func BenchmarkMarshalCorpusFile(b *testing.B) { + buf := make([]byte, 1024*1024) + for i := 0; i < len(buf); i++ { + buf[i] = byte(i) + } + + for sz := 1; sz <= len(buf); sz <<= 1 { + sz := sz + b.Run(strconv.Itoa(sz), func(b *testing.B) { + for i := 0; i < b.N; i++ { + b.SetBytes(int64(sz)) + marshalCorpusFile(buf[:sz]) + } + }) + } +} + +// BenchmarkUnmarshalCorpusfile measures the time it takes to deserialize +// files encoding byte slices of various sizes. The slice contains a repeating +// sequence of bytes 0-255 to mix escaped and non-escaped characters. +func BenchmarkUnmarshalCorpusFile(b *testing.B) { + buf := make([]byte, 1024*1024) + for i := 0; i < len(buf); i++ { + buf[i] = byte(i) + } + + for sz := 1; sz <= len(buf); sz <<= 1 { + sz := sz + data := marshalCorpusFile(buf[:sz]) + b.Run(strconv.Itoa(sz), func(b *testing.B) { + for i := 0; i < b.N; i++ { + b.SetBytes(int64(sz)) + unmarshalCorpusFile(data) + } + }) + } +} + +func TestByteRoundTrip(t *testing.T) { + for x := 0; x < 256; x++ { + b1 := byte(x) + buf := marshalCorpusFile(b1) + vs, err := unmarshalCorpusFile(buf) + if err != nil { + t.Fatal(err) + } + b2 := vs[0].(byte) + if b2 != b1 { + t.Fatalf("unmarshaled %v, want %v:\n%s", b2, b1, buf) + } + } +} + +func TestInt8RoundTrip(t *testing.T) { + for x := -128; x < 128; x++ { + i1 := int8(x) + buf := marshalCorpusFile(i1) + vs, err := unmarshalCorpusFile(buf) + if err != nil { + t.Fatal(err) + } + i2 := vs[0].(int8) + if i2 != i1 { + t.Fatalf("unmarshaled %v, want %v:\n%s", i2, i1, buf) + } + } +} + +func FuzzFloat64RoundTrip(f *testing.F) { + f.Add(math.Float64bits(0)) + f.Add(math.Float64bits(math.Copysign(0, -1))) + f.Add(math.Float64bits(math.MaxFloat64)) + f.Add(math.Float64bits(math.SmallestNonzeroFloat64)) + f.Add(math.Float64bits(math.NaN())) + f.Add(uint64(0x7FF0000000000001)) // signaling NaN + f.Add(math.Float64bits(math.Inf(1))) + f.Add(math.Float64bits(math.Inf(-1))) + + f.Fuzz(func(t *testing.T, u1 uint64) { + x1 := math.Float64frombits(u1) + + b := marshalCorpusFile(x1) + t.Logf("marshaled math.Float64frombits(0x%x):\n%s", u1, b) + + xs, err := unmarshalCorpusFile(b) + if err != nil { + t.Fatal(err) + } + if len(xs) != 1 { + t.Fatalf("unmarshaled %d values", len(xs)) + } + x2 := xs[0].(float64) + u2 := math.Float64bits(x2) + if u2 != u1 { + t.Errorf("unmarshaled %v (bits 0x%x)", x2, u2) + } + }) +} + +func FuzzRuneRoundTrip(f *testing.F) { + f.Add(rune(-1)) + f.Add(rune(0xd800)) + f.Add(rune(0xdfff)) + f.Add(rune(unicode.ReplacementChar)) + f.Add(rune(unicode.MaxASCII)) + f.Add(rune(unicode.MaxLatin1)) + f.Add(rune(unicode.MaxRune)) + f.Add(rune(unicode.MaxRune + 1)) + f.Add(rune(-0x80000000)) + f.Add(rune(0x7fffffff)) + + f.Fuzz(func(t *testing.T, r1 rune) { + b := marshalCorpusFile(r1) + t.Logf("marshaled rune(0x%x):\n%s", r1, b) + + rs, err := unmarshalCorpusFile(b) + if err != nil { + t.Fatal(err) + } + if len(rs) != 1 { + t.Fatalf("unmarshaled %d values", len(rs)) + } + r2 := rs[0].(rune) + if r2 != r1 { + t.Errorf("unmarshaled rune(0x%x)", r2) + } + }) +} + +func FuzzStringRoundTrip(f *testing.F) { + f.Add("") + f.Add("\x00") + f.Add(string([]rune{unicode.ReplacementChar})) + + f.Fuzz(func(t *testing.T, s1 string) { + b := marshalCorpusFile(s1) + t.Logf("marshaled %q:\n%s", s1, b) + + rs, err := unmarshalCorpusFile(b) + if err != nil { + t.Fatal(err) + } + if len(rs) != 1 { + t.Fatalf("unmarshaled %d values", len(rs)) + } + s2 := rs[0].(string) + if s2 != s1 { + t.Errorf("unmarshaled %q", s2) + } + }) +} diff --git a/testing/internal/fuzz/fuzz.go b/testing/internal/fuzz/fuzz.go new file mode 100644 index 0000000..f4d43fa --- /dev/null +++ b/testing/internal/fuzz/fuzz.go @@ -0,0 +1,1103 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package fuzz provides common fuzzing functionality for tests built with +// "go test" and for programs that use fuzzing functionality in the testing +// package. +package fuzz + +import ( + "bytes" + "context" + "crypto/sha256" + "errors" + "fmt" + "io" + "math/bits" + "os" + "path/filepath" + "reflect" + "runtime" + "strings" + "time" + + "github.com/CodSpeedHQ/codspeed-go/testing/internal/godebug" +) + +// CoordinateFuzzingOpts is a set of arguments for CoordinateFuzzing. +// The zero value is valid for each field unless specified otherwise. +type CoordinateFuzzingOpts struct { + // Log is a writer for logging progress messages and warnings. + // If nil, io.Discard will be used instead. + Log io.Writer + + // Timeout is the amount of wall clock time to spend fuzzing after the corpus + // has loaded. If zero, there will be no time limit. + Timeout time.Duration + + // Limit is the number of random values to generate and test. If zero, + // there will be no limit on the number of generated values. + Limit int64 + + // MinimizeTimeout is the amount of wall clock time to spend minimizing + // after discovering a crasher. If zero, there will be no time limit. If + // MinimizeTimeout and MinimizeLimit are both zero, then minimization will + // be disabled. + MinimizeTimeout time.Duration + + // MinimizeLimit is the maximum number of calls to the fuzz function to be + // made while minimizing after finding a crash. If zero, there will be no + // limit. Calls to the fuzz function made when minimizing also count toward + // Limit. If MinimizeTimeout and MinimizeLimit are both zero, then + // minimization will be disabled. + MinimizeLimit int64 + + // parallel is the number of worker processes to run in parallel. If zero, + // CoordinateFuzzing will run GOMAXPROCS workers. + Parallel int + + // Seed is a list of seed values added by the fuzz target with testing.F.Add + // and in testdata. + Seed []CorpusEntry + + // Types is the list of types which make up a corpus entry. + // Types must be set and must match values in Seed. + Types []reflect.Type + + // CorpusDir is a directory where files containing values that crash the + // code being tested may be written. CorpusDir must be set. + CorpusDir string + + // CacheDir is a directory containing additional "interesting" values. + // The fuzzer may derive new values from these, and may write new values here. + CacheDir string +} + +// CoordinateFuzzing creates several worker processes and communicates with +// them to test random inputs that could trigger crashes and expose bugs. +// The worker processes run the same binary in the same directory with the +// same environment variables as the coordinator process. Workers also run +// with the same arguments as the coordinator, except with the -test.fuzzworker +// flag prepended to the argument list. +// +// If a crash occurs, the function will return an error containing information +// about the crash, which can be reported to the user. +func CoordinateFuzzing(ctx context.Context, opts CoordinateFuzzingOpts) (err error) { + if err := ctx.Err(); err != nil { + return err + } + if opts.Log == nil { + opts.Log = io.Discard + } + if opts.Parallel == 0 { + opts.Parallel = runtime.GOMAXPROCS(0) + } + if opts.Limit > 0 && int64(opts.Parallel) > opts.Limit { + // Don't start more workers than we need. + opts.Parallel = int(opts.Limit) + } + + c, err := newCoordinator(opts) + if err != nil { + return err + } + + if opts.Timeout > 0 { + var cancel func() + ctx, cancel = context.WithTimeout(ctx, opts.Timeout) + defer cancel() + } + + // fuzzCtx is used to stop workers, for example, after finding a crasher. + fuzzCtx, cancelWorkers := context.WithCancel(ctx) + defer cancelWorkers() + doneC := ctx.Done() + + // stop is called when a worker encounters a fatal error. + var fuzzErr error + stopping := false + stop := func(err error) { + if shouldPrintDebugInfo() { + _, file, line, ok := runtime.Caller(1) + if ok { + c.debugLogf("stop called at %s:%d. stopping: %t", file, line, stopping) + } else { + c.debugLogf("stop called at unknown. stopping: %t", stopping) + } + } + + if err == fuzzCtx.Err() || isInterruptError(err) { + // Suppress cancellation errors and terminations due to SIGINT. + // The messages are not helpful since either the user triggered the error + // (with ^C) or another more helpful message will be printed (a crasher). + err = nil + } + if err != nil && (fuzzErr == nil || fuzzErr == ctx.Err()) { + fuzzErr = err + } + if stopping { + return + } + stopping = true + cancelWorkers() + doneC = nil + } + + // Ensure that any crash we find is written to the corpus, even if an error + // or interruption occurs while minimizing it. + crashWritten := false + defer func() { + if c.crashMinimizing == nil || crashWritten { + return + } + werr := writeToCorpus(&c.crashMinimizing.entry, opts.CorpusDir) + if werr != nil { + err = fmt.Errorf("%w\n%v", err, werr) + return + } + if err == nil { + err = &crashError{ + path: c.crashMinimizing.entry.Path, + err: errors.New(c.crashMinimizing.crasherMsg), + } + } + }() + + // Start workers. + // TODO(jayconrod): do we want to support fuzzing different binaries? + dir := "" // same as self + binPath := os.Args[0] + args := append([]string{"-test.fuzzworker"}, os.Args[1:]...) + env := os.Environ() // same as self + + errC := make(chan error) + workers := make([]*worker, opts.Parallel) + for i := range workers { + var err error + workers[i], err = newWorker(c, dir, binPath, args, env) + if err != nil { + return err + } + } + for i := range workers { + w := workers[i] + go func() { + err := w.coordinate(fuzzCtx) + if fuzzCtx.Err() != nil || isInterruptError(err) { + err = nil + } + cleanErr := w.cleanup() + if err == nil { + err = cleanErr + } + errC <- err + }() + } + + // Main event loop. + // Do not return until all workers have terminated. We avoid a deadlock by + // receiving messages from workers even after ctx is canceled. + activeWorkers := len(workers) + statTicker := time.NewTicker(3 * time.Second) + defer statTicker.Stop() + defer c.logStats() + + c.logStats() + for { + // If there is an execution limit, and we've reached it, stop. + if c.opts.Limit > 0 && c.count >= c.opts.Limit { + stop(nil) + } + + var inputC chan fuzzInput + input, ok := c.peekInput() + if ok && c.crashMinimizing == nil && !stopping { + inputC = c.inputC + } + + var minimizeC chan fuzzMinimizeInput + minimizeInput, ok := c.peekMinimizeInput() + if ok && !stopping { + minimizeC = c.minimizeC + } + + select { + case <-doneC: + // Interrupted, canceled, or timed out. + // stop sets doneC to nil, so we don't busy wait here. + stop(ctx.Err()) + + case err := <-errC: + // A worker terminated, possibly after encountering a fatal error. + stop(err) + activeWorkers-- + if activeWorkers == 0 { + return fuzzErr + } + + case result := <-c.resultC: + // Received response from worker. + if stopping { + break + } + c.updateStats(result) + + if result.crasherMsg != "" { + if c.warmupRun() && result.entry.IsSeed { + target := filepath.Base(c.opts.CorpusDir) + fmt.Fprintf(c.opts.Log, "failure while testing seed corpus entry: %s/%s\n", target, testName(result.entry.Parent)) + stop(errors.New(result.crasherMsg)) + break + } + if c.canMinimize() && result.canMinimize { + if c.crashMinimizing != nil { + // This crash is not minimized, and another crash is being minimized. + // Ignore this one and wait for the other one to finish. + if shouldPrintDebugInfo() { + c.debugLogf("found unminimized crasher, skipping in favor of minimizable crasher") + } + break + } + // Found a crasher but haven't yet attempted to minimize it. + // Send it back to a worker for minimization. Disable inputC so + // other workers don't continue fuzzing. + c.crashMinimizing = &result + fmt.Fprintf(c.opts.Log, "fuzz: minimizing %d-byte failing input file\n", len(result.entry.Data)) + c.queueForMinimization(result, nil) + } else if !crashWritten { + // Found a crasher that's either minimized or not minimizable. + // Write to corpus and stop. + err := writeToCorpus(&result.entry, opts.CorpusDir) + if err == nil { + crashWritten = true + err = &crashError{ + path: result.entry.Path, + err: errors.New(result.crasherMsg), + } + } + if shouldPrintDebugInfo() { + c.debugLogf( + "found crasher, id: %s, parent: %s, gen: %d, size: %d, exec time: %s", + result.entry.Path, + result.entry.Parent, + result.entry.Generation, + len(result.entry.Data), + result.entryDuration, + ) + } + stop(err) + } + } else if result.coverageData != nil { + if c.warmupRun() { + if shouldPrintDebugInfo() { + c.debugLogf( + "processed an initial input, id: %s, new bits: %d, size: %d, exec time: %s", + result.entry.Parent, + countBits(diffCoverage(c.coverageMask, result.coverageData)), + len(result.entry.Data), + result.entryDuration, + ) + } + c.updateCoverage(result.coverageData) + c.warmupInputLeft-- + if c.warmupInputLeft == 0 { + fmt.Fprintf(c.opts.Log, "fuzz: elapsed: %s, gathering baseline coverage: %d/%d completed, now fuzzing with %d workers\n", c.elapsed(), c.warmupInputCount, c.warmupInputCount, c.opts.Parallel) + if shouldPrintDebugInfo() { + c.debugLogf( + "finished processing input corpus, entries: %d, initial coverage bits: %d", + len(c.corpus.entries), + countBits(c.coverageMask), + ) + } + } + } else if keepCoverage := diffCoverage(c.coverageMask, result.coverageData); keepCoverage != nil { + // Found a value that expanded coverage. + // It's not a crasher, but we may want to add it to the on-disk + // corpus and prioritize it for future fuzzing. + // TODO(jayconrod, katiehockman): Prioritize fuzzing these + // values which expanded coverage, perhaps based on the + // number of new edges that this result expanded. + // TODO(jayconrod, katiehockman): Don't write a value that's already + // in the corpus. + if c.canMinimize() && result.canMinimize && c.crashMinimizing == nil { + // Send back to workers to find a smaller value that preserves + // at least one new coverage bit. + c.queueForMinimization(result, keepCoverage) + } else { + // Update the coordinator's coverage mask and save the value. + inputSize := len(result.entry.Data) + entryNew, err := c.addCorpusEntries(true, result.entry) + if err != nil { + stop(err) + break + } + if !entryNew { + if shouldPrintDebugInfo() { + c.debugLogf( + "ignoring duplicate input which increased coverage, id: %s", + result.entry.Path, + ) + } + break + } + c.updateCoverage(keepCoverage) + c.inputQueue.enqueue(result.entry) + c.interestingCount++ + if shouldPrintDebugInfo() { + c.debugLogf( + "new interesting input, id: %s, parent: %s, gen: %d, new bits: %d, total bits: %d, size: %d, exec time: %s", + result.entry.Path, + result.entry.Parent, + result.entry.Generation, + countBits(keepCoverage), + countBits(c.coverageMask), + inputSize, + result.entryDuration, + ) + } + } + } else { + if shouldPrintDebugInfo() { + c.debugLogf( + "worker reported interesting input that doesn't expand coverage, id: %s, parent: %s, canMinimize: %t", + result.entry.Path, + result.entry.Parent, + result.canMinimize, + ) + } + } + } else if c.warmupRun() { + // No error or coverage data was reported for this input during + // warmup, so continue processing results. + c.warmupInputLeft-- + if c.warmupInputLeft == 0 { + fmt.Fprintf(c.opts.Log, "fuzz: elapsed: %s, testing seed corpus: %d/%d completed, now fuzzing with %d workers\n", c.elapsed(), c.warmupInputCount, c.warmupInputCount, c.opts.Parallel) + if shouldPrintDebugInfo() { + c.debugLogf( + "finished testing-only phase, entries: %d", + len(c.corpus.entries), + ) + } + } + } + + case inputC <- input: + // Sent the next input to a worker. + c.sentInput(input) + + case minimizeC <- minimizeInput: + // Sent the next input for minimization to a worker. + c.sentMinimizeInput(minimizeInput) + + case <-statTicker.C: + c.logStats() + } + } + + // TODO(jayconrod,katiehockman): if a crasher can't be written to the corpus, + // write to the cache instead. +} + +// crashError wraps a crasher written to the seed corpus. It saves the name +// of the file where the input causing the crasher was saved. The testing +// framework uses this to report a command to re-run that specific input. +type crashError struct { + path string + err error +} + +func (e *crashError) Error() string { + return e.err.Error() +} + +func (e *crashError) Unwrap() error { + return e.err +} + +func (e *crashError) CrashPath() string { + return e.path +} + +type corpus struct { + entries []CorpusEntry + hashes map[[sha256.Size]byte]bool +} + +// addCorpusEntries adds entries to the corpus, and optionally writes the entries +// to the cache directory. If an entry is already in the corpus it is skipped. If +// all of the entries are unique, addCorpusEntries returns true and a nil error, +// if at least one of the entries was a duplicate, it returns false and a nil error. +func (c *coordinator) addCorpusEntries(addToCache bool, entries ...CorpusEntry) (bool, error) { + noDupes := true + for _, e := range entries { + data, err := corpusEntryData(e) + if err != nil { + return false, err + } + h := sha256.Sum256(data) + if c.corpus.hashes[h] { + noDupes = false + continue + } + if addToCache { + if err := writeToCorpus(&e, c.opts.CacheDir); err != nil { + return false, err + } + // For entries written to disk, we don't hold onto the bytes, + // since the corpus would consume a significant amount of + // memory. + e.Data = nil + } + c.corpus.hashes[h] = true + c.corpus.entries = append(c.corpus.entries, e) + } + return noDupes, nil +} + +// CorpusEntry represents an individual input for fuzzing. +// +// We must use an equivalent type in the testing and testing/internal/testdeps +// packages, but testing can't import this package directly, and we don't want +// to export this type from testing. Instead, we use the same struct type and +// use a type alias (not a defined type) for convenience. +type CorpusEntry = struct { + Parent string + + // Path is the path of the corpus file, if the entry was loaded from disk. + // For other entries, including seed values provided by f.Add, Path is the + // name of the test, e.g. seed#0 or its hash. + Path string + + // Data is the raw input data. Data should only be populated for seed + // values. For on-disk corpus files, Data will be nil, as it will be loaded + // from disk using Path. + Data []byte + + // Values is the unmarshaled values from a corpus file. + Values []any + + Generation int + + // IsSeed indicates whether this entry is part of the seed corpus. + IsSeed bool +} + +// corpusEntryData returns the raw input bytes, either from the data struct +// field, or from disk. +func corpusEntryData(ce CorpusEntry) ([]byte, error) { + if ce.Data != nil { + return ce.Data, nil + } + + return os.ReadFile(ce.Path) +} + +type fuzzInput struct { + // entry is the value to test initially. The worker will randomly mutate + // values from this starting point. + entry CorpusEntry + + // timeout is the time to spend fuzzing variations of this input, + // not including starting or cleaning up. + timeout time.Duration + + // limit is the maximum number of calls to the fuzz function the worker may + // make. The worker may make fewer calls, for example, if it finds an + // error early. If limit is zero, there is no limit on calls to the + // fuzz function. + limit int64 + + // warmup indicates whether this is a warmup input before fuzzing begins. If + // true, the input should not be fuzzed. + warmup bool + + // coverageData reflects the coordinator's current coverageMask. + coverageData []byte +} + +type fuzzResult struct { + // entry is an interesting value or a crasher. + entry CorpusEntry + + // crasherMsg is an error message from a crash. It's "" if no crash was found. + crasherMsg string + + // canMinimize is true if the worker should attempt to minimize this result. + // It may be false because an attempt has already been made. + canMinimize bool + + // coverageData is set if the worker found new coverage. + coverageData []byte + + // limit is the number of values the coordinator asked the worker + // to test. 0 if there was no limit. + limit int64 + + // count is the number of values the worker actually tested. + count int64 + + // totalDuration is the time the worker spent testing inputs. + totalDuration time.Duration + + // entryDuration is the time the worker spent execution an interesting result + entryDuration time.Duration +} + +type fuzzMinimizeInput struct { + // entry is an interesting value or crasher to minimize. + entry CorpusEntry + + // crasherMsg is an error message from a crash. It's "" if no crash was found. + // If set, the worker will attempt to find a smaller input that also produces + // an error, though not necessarily the same error. + crasherMsg string + + // limit is the maximum number of calls to the fuzz function the worker may + // make. The worker may make fewer calls, for example, if it can't reproduce + // an error. If limit is zero, there is no limit on calls to the fuzz function. + limit int64 + + // timeout is the time to spend minimizing this input. + // A zero timeout means no limit. + timeout time.Duration + + // keepCoverage is a set of coverage bits that entry found that were not in + // the coordinator's combined set. When minimizing, the worker should find an + // input that preserves at least one of these bits. keepCoverage is nil for + // crashing inputs. + keepCoverage []byte +} + +// coordinator holds channels that workers can use to communicate with +// the coordinator. +type coordinator struct { + opts CoordinateFuzzingOpts + + // startTime is the time we started the workers after loading the corpus. + // Used for logging. + startTime time.Time + + // inputC is sent values to fuzz by the coordinator. Any worker may receive + // values from this channel. Workers send results to resultC. + inputC chan fuzzInput + + // minimizeC is sent values to minimize by the coordinator. Any worker may + // receive values from this channel. Workers send results to resultC. + minimizeC chan fuzzMinimizeInput + + // resultC is sent results of fuzzing by workers. The coordinator + // receives these. Multiple types of messages are allowed. + resultC chan fuzzResult + + // count is the number of values fuzzed so far. + count int64 + + // countLastLog is the number of values fuzzed when the output was last + // logged. + countLastLog int64 + + // timeLastLog is the time at which the output was last logged. + timeLastLog time.Time + + // interestingCount is the number of unique interesting values which have + // been found this execution. + interestingCount int + + // warmupInputCount is the count of all entries in the corpus which will + // need to be received from workers to run once during warmup, but not fuzz. + // This could be for coverage data, or only for the purposes of verifying + // that the seed corpus doesn't have any crashers. See warmupRun. + warmupInputCount int + + // warmupInputLeft is the number of entries in the corpus which still need + // to be received from workers to run once during warmup, but not fuzz. + // See warmupInputLeft. + warmupInputLeft int + + // duration is the time spent fuzzing inside workers, not counting time + // starting up or tearing down. + duration time.Duration + + // countWaiting is the number of fuzzing executions the coordinator is + // waiting on workers to complete. + countWaiting int64 + + // corpus is a set of interesting values, including the seed corpus and + // generated values that workers reported as interesting. + corpus corpus + + // minimizationAllowed is true if one or more of the types of fuzz + // function's parameters can be minimized. + minimizationAllowed bool + + // inputQueue is a queue of inputs that workers should try fuzzing. This is + // initially populated from the seed corpus and cached inputs. More inputs + // may be added as new coverage is discovered. + inputQueue queue + + // minimizeQueue is a queue of inputs that caused errors or exposed new + // coverage. Workers should attempt to find smaller inputs that do the + // same thing. + minimizeQueue queue + + // crashMinimizing is the crash that is currently being minimized. + crashMinimizing *fuzzResult + + // coverageMask aggregates coverage that was found for all inputs in the + // corpus. Each byte represents a single basic execution block. Each set bit + // within the byte indicates that an input has triggered that block at least + // 1 << n times, where n is the position of the bit in the byte. For example, a + // value of 12 indicates that separate inputs have triggered this block + // between 4-7 times and 8-15 times. + coverageMask []byte +} + +func newCoordinator(opts CoordinateFuzzingOpts) (*coordinator, error) { + // Make sure all the seed corpus has marshaled data. + for i := range opts.Seed { + if opts.Seed[i].Data == nil && opts.Seed[i].Values != nil { + opts.Seed[i].Data = marshalCorpusFile(opts.Seed[i].Values...) + } + } + c := &coordinator{ + opts: opts, + startTime: time.Now(), + inputC: make(chan fuzzInput), + minimizeC: make(chan fuzzMinimizeInput), + resultC: make(chan fuzzResult), + timeLastLog: time.Now(), + corpus: corpus{hashes: make(map[[sha256.Size]byte]bool)}, + } + if err := c.readCache(); err != nil { + return nil, err + } + if opts.MinimizeLimit > 0 || opts.MinimizeTimeout > 0 { + for _, t := range opts.Types { + if isMinimizable(t) { + c.minimizationAllowed = true + break + } + } + } + + covSize := len(coverage()) + if covSize == 0 { + fmt.Fprintf(c.opts.Log, "warning: the test binary was not built with coverage instrumentation, so fuzzing will run without coverage guidance and may be inefficient\n") + // Even though a coverage-only run won't occur, we should still run all + // of the seed corpus to make sure there are no existing failures before + // we start fuzzing. + c.warmupInputCount = len(c.opts.Seed) + for _, e := range c.opts.Seed { + c.inputQueue.enqueue(e) + } + } else { + c.warmupInputCount = len(c.corpus.entries) + for _, e := range c.corpus.entries { + c.inputQueue.enqueue(e) + } + // Set c.coverageMask to a clean []byte full of zeros. + c.coverageMask = make([]byte, covSize) + } + c.warmupInputLeft = c.warmupInputCount + + if len(c.corpus.entries) == 0 { + fmt.Fprintf(c.opts.Log, "warning: starting with empty corpus\n") + var vals []any + for _, t := range opts.Types { + vals = append(vals, zeroValue(t)) + } + data := marshalCorpusFile(vals...) + h := sha256.Sum256(data) + name := fmt.Sprintf("%x", h[:4]) + c.addCorpusEntries(false, CorpusEntry{Path: name, Data: data}) + } + + return c, nil +} + +func (c *coordinator) updateStats(result fuzzResult) { + c.count += result.count + c.countWaiting -= result.limit + c.duration += result.totalDuration +} + +func (c *coordinator) logStats() { + now := time.Now() + if c.warmupRun() { + runSoFar := c.warmupInputCount - c.warmupInputLeft + if coverageEnabled { + fmt.Fprintf(c.opts.Log, "fuzz: elapsed: %s, gathering baseline coverage: %d/%d completed\n", c.elapsed(), runSoFar, c.warmupInputCount) + } else { + fmt.Fprintf(c.opts.Log, "fuzz: elapsed: %s, testing seed corpus: %d/%d completed\n", c.elapsed(), runSoFar, c.warmupInputCount) + } + } else if c.crashMinimizing != nil { + fmt.Fprintf(c.opts.Log, "fuzz: elapsed: %s, minimizing\n", c.elapsed()) + } else { + rate := float64(c.count-c.countLastLog) / now.Sub(c.timeLastLog).Seconds() + if coverageEnabled { + total := c.warmupInputCount + c.interestingCount + fmt.Fprintf(c.opts.Log, "fuzz: elapsed: %s, execs: %d (%.0f/sec), new interesting: %d (total: %d)\n", c.elapsed(), c.count, rate, c.interestingCount, total) + } else { + fmt.Fprintf(c.opts.Log, "fuzz: elapsed: %s, execs: %d (%.0f/sec)\n", c.elapsed(), c.count, rate) + } + } + c.countLastLog = c.count + c.timeLastLog = now +} + +// peekInput returns the next value that should be sent to workers. +// If the number of executions is limited, the returned value includes +// a limit for one worker. If there are no executions left, peekInput returns +// a zero value and false. +// +// peekInput doesn't actually remove the input from the queue. The caller +// must call sentInput after sending the input. +// +// If the input queue is empty and the coverage/testing-only run has completed, +// queue refills it from the corpus. +func (c *coordinator) peekInput() (fuzzInput, bool) { + if c.opts.Limit > 0 && c.count+c.countWaiting >= c.opts.Limit { + // Already making the maximum number of calls to the fuzz function. + // Don't send more inputs right now. + return fuzzInput{}, false + } + if c.inputQueue.len == 0 { + if c.warmupRun() { + // Wait for coverage/testing-only run to finish before sending more + // inputs. + return fuzzInput{}, false + } + c.refillInputQueue() + } + + entry, ok := c.inputQueue.peek() + if !ok { + panic("input queue empty after refill") + } + input := fuzzInput{ + entry: entry.(CorpusEntry), + timeout: workerFuzzDuration, + warmup: c.warmupRun(), + } + if c.coverageMask != nil { + input.coverageData = bytes.Clone(c.coverageMask) + } + if input.warmup { + // No fuzzing will occur, but it should count toward the limit set by + // -fuzztime. + input.limit = 1 + return input, true + } + + if c.opts.Limit > 0 { + input.limit = c.opts.Limit / int64(c.opts.Parallel) + if c.opts.Limit%int64(c.opts.Parallel) > 0 { + input.limit++ + } + remaining := c.opts.Limit - c.count - c.countWaiting + if input.limit > remaining { + input.limit = remaining + } + } + return input, true +} + +// sentInput updates internal counters after an input is sent to c.inputC. +func (c *coordinator) sentInput(input fuzzInput) { + c.inputQueue.dequeue() + c.countWaiting += input.limit +} + +// refillInputQueue refills the input queue from the corpus after it becomes +// empty. +func (c *coordinator) refillInputQueue() { + for _, e := range c.corpus.entries { + c.inputQueue.enqueue(e) + } +} + +// queueForMinimization creates a fuzzMinimizeInput from result and adds it +// to the minimization queue to be sent to workers. +func (c *coordinator) queueForMinimization(result fuzzResult, keepCoverage []byte) { + if shouldPrintDebugInfo() { + c.debugLogf( + "queueing input for minimization, id: %s, parent: %s, keepCoverage: %t, crasher: %t", + result.entry.Path, + result.entry.Parent, + keepCoverage != nil, + result.crasherMsg != "", + ) + } + if result.crasherMsg != "" { + c.minimizeQueue.clear() + } + + input := fuzzMinimizeInput{ + entry: result.entry, + crasherMsg: result.crasherMsg, + keepCoverage: keepCoverage, + } + c.minimizeQueue.enqueue(input) +} + +// peekMinimizeInput returns the next input that should be sent to workers for +// minimization. +func (c *coordinator) peekMinimizeInput() (fuzzMinimizeInput, bool) { + if !c.canMinimize() { + // Already making the maximum number of calls to the fuzz function. + // Don't send more inputs right now. + return fuzzMinimizeInput{}, false + } + v, ok := c.minimizeQueue.peek() + if !ok { + return fuzzMinimizeInput{}, false + } + input := v.(fuzzMinimizeInput) + + if c.opts.MinimizeTimeout > 0 { + input.timeout = c.opts.MinimizeTimeout + } + if c.opts.MinimizeLimit > 0 { + input.limit = c.opts.MinimizeLimit + } else if c.opts.Limit > 0 { + if input.crasherMsg != "" { + input.limit = c.opts.Limit + } else { + input.limit = c.opts.Limit / int64(c.opts.Parallel) + if c.opts.Limit%int64(c.opts.Parallel) > 0 { + input.limit++ + } + } + } + if c.opts.Limit > 0 { + remaining := c.opts.Limit - c.count - c.countWaiting + if input.limit > remaining { + input.limit = remaining + } + } + return input, true +} + +// sentMinimizeInput removes an input from the minimization queue after it's +// sent to minimizeC. +func (c *coordinator) sentMinimizeInput(input fuzzMinimizeInput) { + c.minimizeQueue.dequeue() + c.countWaiting += input.limit +} + +// warmupRun returns true while the coordinator is running inputs without +// mutating them as a warmup before fuzzing. This could be to gather baseline +// coverage data for entries in the corpus, or to test all of the seed corpus +// for errors before fuzzing begins. +// +// The coordinator doesn't store coverage data in the cache with each input +// because that data would be invalid when counter offsets in the test binary +// change. +// +// When gathering coverage, the coordinator sends each entry to a worker to +// gather coverage for that entry only, without fuzzing or minimizing. This +// phase ends when all workers have finished, and the coordinator has a combined +// coverage map. +func (c *coordinator) warmupRun() bool { + return c.warmupInputLeft > 0 +} + +// updateCoverage sets bits in c.coverageMask that are set in newCoverage. +// updateCoverage returns the number of newly set bits. See the comment on +// coverageMask for the format. +func (c *coordinator) updateCoverage(newCoverage []byte) int { + if len(newCoverage) != len(c.coverageMask) { + panic(fmt.Sprintf("number of coverage counters changed at runtime: %d, expected %d", len(newCoverage), len(c.coverageMask))) + } + newBitCount := 0 + for i := range newCoverage { + diff := newCoverage[i] &^ c.coverageMask[i] + newBitCount += bits.OnesCount8(diff) + c.coverageMask[i] |= newCoverage[i] + } + return newBitCount +} + +// canMinimize returns whether the coordinator should attempt to find smaller +// inputs that reproduce a crash or new coverage. +func (c *coordinator) canMinimize() bool { + return c.minimizationAllowed && + (c.opts.Limit == 0 || c.count+c.countWaiting < c.opts.Limit) +} + +func (c *coordinator) elapsed() time.Duration { + return time.Since(c.startTime).Round(1 * time.Second) +} + +// readCache creates a combined corpus from seed values and values in the cache +// (in GOCACHE/fuzz). +// +// TODO(fuzzing): need a mechanism that can remove values that +// aren't useful anymore, for example, because they have the wrong type. +func (c *coordinator) readCache() error { + if _, err := c.addCorpusEntries(false, c.opts.Seed...); err != nil { + return err + } + entries, err := ReadCorpus(c.opts.CacheDir, c.opts.Types) + if err != nil { + if _, ok := err.(*MalformedCorpusError); !ok { + // It's okay if some files in the cache directory are malformed and + // are not included in the corpus, but fail if it's an I/O error. + return err + } + // TODO(jayconrod,katiehockman): consider printing some kind of warning + // indicating the number of files which were skipped because they are + // malformed. + } + if _, err := c.addCorpusEntries(false, entries...); err != nil { + return err + } + return nil +} + +// MalformedCorpusError is an error found while reading the corpus from the +// filesystem. All of the errors are stored in the errs list. The testing +// framework uses this to report malformed files in testdata. +type MalformedCorpusError struct { + errs []error +} + +func (e *MalformedCorpusError) Error() string { + var msgs []string + for _, s := range e.errs { + msgs = append(msgs, s.Error()) + } + return strings.Join(msgs, "\n") +} + +// ReadCorpus reads the corpus from the provided dir. The returned corpus +// entries are guaranteed to match the given types. Any malformed files will +// be saved in a MalformedCorpusError and returned, along with the most recent +// error. +func ReadCorpus(dir string, types []reflect.Type) ([]CorpusEntry, error) { + files, err := os.ReadDir(dir) + if os.IsNotExist(err) { + return nil, nil // No corpus to read + } else if err != nil { + return nil, fmt.Errorf("reading seed corpus from testdata: %v", err) + } + var corpus []CorpusEntry + var errs []error + for _, file := range files { + // TODO(jayconrod,katiehockman): determine when a file is a fuzzing input + // based on its name. We should only read files created by writeToCorpus. + // If we read ALL files, we won't be able to change the file format by + // changing the extension. We also won't be able to add files like + // README.txt explaining why the directory exists. + if file.IsDir() { + continue + } + filename := filepath.Join(dir, file.Name()) + data, err := os.ReadFile(filename) + if err != nil { + return nil, fmt.Errorf("failed to read corpus file: %v", err) + } + var vals []any + vals, err = readCorpusData(data, types) + if err != nil { + errs = append(errs, fmt.Errorf("%q: %v", filename, err)) + continue + } + corpus = append(corpus, CorpusEntry{Path: filename, Values: vals}) + } + if len(errs) > 0 { + return corpus, &MalformedCorpusError{errs: errs} + } + return corpus, nil +} + +func readCorpusData(data []byte, types []reflect.Type) ([]any, error) { + vals, err := unmarshalCorpusFile(data) + if err != nil { + return nil, fmt.Errorf("unmarshal: %v", err) + } + if err = CheckCorpus(vals, types); err != nil { + return nil, err + } + return vals, nil +} + +// CheckCorpus verifies that the types in vals match the expected types +// provided. +func CheckCorpus(vals []any, types []reflect.Type) error { + if len(vals) != len(types) { + return fmt.Errorf("wrong number of values in corpus entry: %d, want %d", len(vals), len(types)) + } + valsT := make([]reflect.Type, len(vals)) + for valsI, v := range vals { + valsT[valsI] = reflect.TypeOf(v) + } + for i := range types { + if valsT[i] != types[i] { + return fmt.Errorf("mismatched types in corpus entry: %v, want %v", valsT, types) + } + } + return nil +} + +// writeToCorpus atomically writes the given bytes to a new file in testdata. If +// the directory does not exist, it will create one. If the file already exists, +// writeToCorpus will not rewrite it. writeToCorpus sets entry.Path to the new +// file that was just written or an error if it failed. +func writeToCorpus(entry *CorpusEntry, dir string) (err error) { + sum := fmt.Sprintf("%x", sha256.Sum256(entry.Data))[:16] + entry.Path = filepath.Join(dir, sum) + if err := os.MkdirAll(dir, 0777); err != nil { + return err + } + if err := os.WriteFile(entry.Path, entry.Data, 0666); err != nil { + os.Remove(entry.Path) // remove partially written file + return err + } + return nil +} + +func testName(path string) string { + return filepath.Base(path) +} + +func zeroValue(t reflect.Type) any { + for _, v := range zeroVals { + if reflect.TypeOf(v) == t { + return v + } + } + panic(fmt.Sprintf("unsupported type: %v", t)) +} + +var zeroVals []any = []any{ + []byte(""), + string(""), + false, + byte(0), + rune(0), + float32(0), + float64(0), + int(0), + int8(0), + int16(0), + int32(0), + int64(0), + uint(0), + uint8(0), + uint16(0), + uint32(0), + uint64(0), +} + +var debugInfo = godebug.New("#fuzzdebug").Value() == "1" + +func shouldPrintDebugInfo() bool { + return debugInfo +} + +func (c *coordinator) debugLogf(format string, args ...any) { + t := time.Now().Format("2006-01-02 15:04:05.999999999") + fmt.Fprintf(c.opts.Log, t+" DEBUG "+format+"\n", args...) +} diff --git a/testing/internal/fuzz/mem.go b/testing/internal/fuzz/mem.go new file mode 100644 index 0000000..4155e4e --- /dev/null +++ b/testing/internal/fuzz/mem.go @@ -0,0 +1,138 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package fuzz + +import ( + "bytes" + "fmt" + "os" + "unsafe" +) + +// sharedMem manages access to a region of virtual memory mapped from a file, +// shared between multiple processes. The region includes space for a header and +// a value of variable length. +// +// When fuzzing, the coordinator creates a sharedMem from a temporary file for +// each worker. This buffer is used to pass values to fuzz between processes. +// Care must be taken to manage access to shared memory across processes; +// sharedMem provides no synchronization on its own. See workerComm for an +// explanation. +type sharedMem struct { + // f is the file mapped into memory. + f *os.File + + // region is the mapped region of virtual memory for f. The content of f may + // be read or written through this slice. + region []byte + + // removeOnClose is true if the file should be deleted by Close. + removeOnClose bool + + // sys contains OS-specific information. + sys sharedMemSys +} + +// sharedMemHeader stores metadata in shared memory. +type sharedMemHeader struct { + // count is the number of times the worker has called the fuzz function. + // May be reset by coordinator. + count int64 + + // valueLen is the number of bytes in region which should be read. + valueLen int + + // randState and randInc hold the state of a pseudo-random number generator. + randState, randInc uint64 + + // rawInMem is true if the region holds raw bytes, which occurs during + // minimization. If true after the worker fails during minimization, this + // indicates that an unrecoverable error occurred, and the region can be + // used to retrieve the raw bytes that caused the error. + rawInMem bool +} + +// sharedMemSize returns the size needed for a shared memory buffer that can +// contain values of the given size. +func sharedMemSize(valueSize int) int { + // TODO(jayconrod): set a reasonable maximum size per platform. + return int(unsafe.Sizeof(sharedMemHeader{})) + valueSize +} + +// sharedMemTempFile creates a new temporary file of the given size, then maps +// it into memory. The file will be removed when the Close method is called. +func sharedMemTempFile(size int) (m *sharedMem, err error) { + // Create a temporary file. + f, err := os.CreateTemp("", "fuzz-*") + if err != nil { + return nil, err + } + defer func() { + if err != nil { + f.Close() + os.Remove(f.Name()) + } + }() + + // Resize it to the correct size. + totalSize := sharedMemSize(size) + if err := f.Truncate(int64(totalSize)); err != nil { + return nil, err + } + + // Map the file into memory. + removeOnClose := true + return sharedMemMapFile(f, totalSize, removeOnClose) +} + +// header returns a pointer to metadata within the shared memory region. +func (m *sharedMem) header() *sharedMemHeader { + return (*sharedMemHeader)(unsafe.Pointer(&m.region[0])) +} + +// valueRef returns the value currently stored in shared memory. The returned +// slice points to shared memory; it is not a copy. +func (m *sharedMem) valueRef() []byte { + length := m.header().valueLen + valueOffset := int(unsafe.Sizeof(sharedMemHeader{})) + return m.region[valueOffset : valueOffset+length] +} + +// valueCopy returns a copy of the value stored in shared memory. +func (m *sharedMem) valueCopy() []byte { + ref := m.valueRef() + return bytes.Clone(ref) +} + +// setValue copies the data in b into the shared memory buffer and sets +// the length. len(b) must be less than or equal to the capacity of the buffer +// (as returned by cap(m.value())). +func (m *sharedMem) setValue(b []byte) { + v := m.valueRef() + if len(b) > cap(v) { + panic(fmt.Sprintf("value length %d larger than shared memory capacity %d", len(b), cap(v))) + } + m.header().valueLen = len(b) + copy(v[:cap(v)], b) +} + +// setValueLen sets the length of the shared memory buffer returned by valueRef +// to n, which may be at most the cap of that slice. +// +// Note that we can only store the length in the shared memory header. The full +// slice header contains a pointer, which is likely only valid for one process, +// since each process can map shared memory at a different virtual address. +func (m *sharedMem) setValueLen(n int) { + v := m.valueRef() + if n > cap(v) { + panic(fmt.Sprintf("length %d larger than shared memory capacity %d", n, cap(v))) + } + m.header().valueLen = n +} + +// TODO(jayconrod): add method to resize the buffer. We'll need that when the +// mutator can increase input length. Only the coordinator will be able to +// do it, since we'll need to send a message to the worker telling it to +// remap the file. diff --git a/testing/internal/fuzz/minimize.go b/testing/internal/fuzz/minimize.go new file mode 100644 index 0000000..0e410fb --- /dev/null +++ b/testing/internal/fuzz/minimize.go @@ -0,0 +1,95 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package fuzz + +import ( + "reflect" +) + +func isMinimizable(t reflect.Type) bool { + return t == reflect.TypeOf("") || t == reflect.TypeOf([]byte(nil)) +} + +func minimizeBytes(v []byte, try func([]byte) bool, shouldStop func() bool) { + tmp := make([]byte, len(v)) + // If minimization was successful at any point during minimizeBytes, + // then the vals slice in (*workerServer).minimizeInput will point to + // tmp. Since tmp is altered while making new candidates, we need to + // make sure that it is equal to the correct value, v, before exiting + // this function. + defer copy(tmp, v) + + // First, try to cut the tail. + for n := 1024; n != 0; n /= 2 { + for len(v) > n { + if shouldStop() { + return + } + candidate := v[:len(v)-n] + if !try(candidate) { + break + } + // Set v to the new value to continue iterating. + v = candidate + } + } + + // Then, try to remove each individual byte. + for i := 0; i < len(v)-1; i++ { + if shouldStop() { + return + } + candidate := tmp[:len(v)-1] + copy(candidate[:i], v[:i]) + copy(candidate[i:], v[i+1:]) + if !try(candidate) { + continue + } + // Update v to delete the value at index i. + copy(v[i:], v[i+1:]) + v = v[:len(candidate)] + // v[i] is now different, so decrement i to redo this iteration + // of the loop with the new value. + i-- + } + + // Then, try to remove each possible subset of bytes. + for i := 0; i < len(v)-1; i++ { + copy(tmp, v[:i]) + for j := len(v); j > i+1; j-- { + if shouldStop() { + return + } + candidate := tmp[:len(v)-j+i] + copy(candidate[i:], v[j:]) + if !try(candidate) { + continue + } + // Update v and reset the loop with the new length. + copy(v[i:], v[j:]) + v = v[:len(candidate)] + j = len(v) + } + } + + // Then, try to make it more simplified and human-readable by trying to replace each + // byte with a printable character. + printableChars := []byte("012789ABCXYZabcxyz !\"#$%&'()*+,.") + for i, b := range v { + if shouldStop() { + return + } + + for _, pc := range printableChars { + v[i] = pc + if try(v) { + // Successful. Move on to the next byte in v. + break + } + // Unsuccessful. Revert v[i] back to original value. + v[i] = b + } + } +} diff --git a/testing/internal/fuzz/minimize_test.go b/testing/internal/fuzz/minimize_test.go new file mode 100644 index 0000000..2db2633 --- /dev/null +++ b/testing/internal/fuzz/minimize_test.go @@ -0,0 +1,182 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build darwin || freebsd || linux || windows + +package fuzz + +import ( + "bytes" + "context" + "errors" + "fmt" + "reflect" + "testing" + "time" + "unicode" + "unicode/utf8" +) + +func TestMinimizeInput(t *testing.T) { + type testcase struct { + name string + fn func(CorpusEntry) error + input []any + expected []any + } + cases := []testcase{ + { + name: "ones_byte", + fn: func(e CorpusEntry) error { + b := e.Values[0].([]byte) + ones := 0 + for _, v := range b { + if v == 1 { + ones++ + } + } + if ones == 3 { + return fmt.Errorf("bad %v", e.Values[0]) + } + return nil + }, + input: []any{[]byte{0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}, + expected: []any{[]byte{1, 1, 1}}, + }, + { + name: "single_bytes", + fn: func(e CorpusEntry) error { + b := e.Values[0].([]byte) + if len(b) < 2 { + return nil + } + if len(b) == 2 && b[0] == 1 && b[1] == 2 { + return nil + } + return fmt.Errorf("bad %v", e.Values[0]) + }, + input: []any{[]byte{1, 2, 3, 4, 5}}, + expected: []any{[]byte("00")}, + }, + { + name: "set_of_bytes", + fn: func(e CorpusEntry) error { + b := e.Values[0].([]byte) + if len(b) < 3 { + return nil + } + if bytes.Equal(b, []byte{0, 1, 2, 3, 4, 5}) || bytes.Equal(b, []byte{0, 4, 5}) { + return fmt.Errorf("bad %v", e.Values[0]) + } + return nil + }, + input: []any{[]byte{0, 1, 2, 3, 4, 5}}, + expected: []any{[]byte{0, 4, 5}}, + }, + { + name: "non_ascii_bytes", + fn: func(e CorpusEntry) error { + b := e.Values[0].([]byte) + if len(b) == 3 { + return fmt.Errorf("bad %v", e.Values[0]) + } + return nil + }, + input: []any{[]byte("ท")}, // ท is 3 bytes + expected: []any{[]byte("000")}, + }, + { + name: "ones_string", + fn: func(e CorpusEntry) error { + b := e.Values[0].(string) + ones := 0 + for _, v := range b { + if v == '1' { + ones++ + } + } + if ones == 3 { + return fmt.Errorf("bad %v", e.Values[0]) + } + return nil + }, + input: []any{"001010001000000000000000000"}, + expected: []any{"111"}, + }, + { + name: "string_length", + fn: func(e CorpusEntry) error { + b := e.Values[0].(string) + if len(b) == 5 { + return fmt.Errorf("bad %v", e.Values[0]) + } + return nil + }, + input: []any{"zzzzz"}, + expected: []any{"00000"}, + }, + { + name: "string_with_letter", + fn: func(e CorpusEntry) error { + b := e.Values[0].(string) + r, _ := utf8.DecodeRune([]byte(b)) + if unicode.IsLetter(r) { + return fmt.Errorf("bad %v", e.Values[0]) + } + return nil + }, + input: []any{"ZZZZZ"}, + expected: []any{"A"}, + }, + } + + for _, tc := range cases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + ws := &workerServer{ + fuzzFn: func(e CorpusEntry) (time.Duration, error) { + return time.Second, tc.fn(e) + }, + } + mem := &sharedMem{region: make([]byte, 100)} // big enough to hold value and header + vals := tc.input + success, err := ws.minimizeInput(context.Background(), vals, mem, minimizeArgs{}) + if !success { + t.Errorf("minimizeInput did not succeed") + } + if err == nil { + t.Fatal("minimizeInput didn't provide an error") + } + if expected := fmt.Sprintf("bad %v", tc.expected[0]); err.Error() != expected { + t.Errorf("unexpected error: got %q, want %q", err, expected) + } + if !reflect.DeepEqual(vals, tc.expected) { + t.Errorf("unexpected results: got %v, want %v", vals, tc.expected) + } + }) + } +} + +// TestMinimizeFlaky checks that if we're minimizing an interesting +// input and a flaky failure occurs, that minimization was not indicated +// to be successful, and the error isn't returned (since it's flaky). +func TestMinimizeFlaky(t *testing.T) { + ws := &workerServer{fuzzFn: func(e CorpusEntry) (time.Duration, error) { + return time.Second, errors.New("ohno") + }} + mem := &sharedMem{region: make([]byte, 100)} // big enough to hold value and header + vals := []any{[]byte(nil)} + args := minimizeArgs{KeepCoverage: make([]byte, len(coverageSnapshot))} + success, err := ws.minimizeInput(context.Background(), vals, mem, args) + if success { + t.Error("unexpected success") + } + if err != nil { + t.Errorf("unexpected error: %v", err) + } + if count := mem.header().count; count != 1 { + t.Errorf("count: got %d, want 1", count) + } +} diff --git a/testing/internal/fuzz/mutator.go b/testing/internal/fuzz/mutator.go new file mode 100644 index 0000000..9bba0d6 --- /dev/null +++ b/testing/internal/fuzz/mutator.go @@ -0,0 +1,293 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package fuzz + +import ( + "encoding/binary" + "fmt" + "math" + "unsafe" +) + +type mutator struct { + r mutatorRand + scratch []byte // scratch slice to avoid additional allocations +} + +func newMutator() *mutator { + return &mutator{r: newPcgRand()} +} + +func (m *mutator) rand(n int) int { + return m.r.intn(n) +} + +func (m *mutator) randByteOrder() binary.ByteOrder { + if m.r.bool() { + return binary.LittleEndian + } + return binary.BigEndian +} + +// chooseLen chooses length of range mutation in range [1,n]. It gives +// preference to shorter ranges. +func (m *mutator) chooseLen(n int) int { + switch x := m.rand(100); { + case x < 90: + return m.rand(min(8, n)) + 1 + case x < 99: + return m.rand(min(32, n)) + 1 + default: + return m.rand(n) + 1 + } +} + +// mutate performs several mutations on the provided values. +func (m *mutator) mutate(vals []any, maxBytes int) { + // TODO(katiehockman): pull some of these functions into helper methods and + // test that each case is working as expected. + // TODO(katiehockman): perform more types of mutations for []byte. + + // maxPerVal will represent the maximum number of bytes that each value be + // allowed after mutating, giving an equal amount of capacity to each line. + // Allow a little wiggle room for the encoding. + maxPerVal := maxBytes/len(vals) - 100 + + // Pick a random value to mutate. + // TODO: consider mutating more than one value at a time. + i := m.rand(len(vals)) + switch v := vals[i].(type) { + case int: + vals[i] = int(m.mutateInt(int64(v), maxInt)) + case int8: + vals[i] = int8(m.mutateInt(int64(v), math.MaxInt8)) + case int16: + vals[i] = int16(m.mutateInt(int64(v), math.MaxInt16)) + case int64: + vals[i] = m.mutateInt(v, maxInt) + case uint: + vals[i] = uint(m.mutateUInt(uint64(v), maxUint)) + case uint16: + vals[i] = uint16(m.mutateUInt(uint64(v), math.MaxUint16)) + case uint32: + vals[i] = uint32(m.mutateUInt(uint64(v), math.MaxUint32)) + case uint64: + vals[i] = m.mutateUInt(v, maxUint) + case float32: + vals[i] = float32(m.mutateFloat(float64(v), math.MaxFloat32)) + case float64: + vals[i] = m.mutateFloat(v, math.MaxFloat64) + case bool: + if m.rand(2) == 1 { + vals[i] = !v // 50% chance of flipping the bool + } + case rune: // int32 + vals[i] = rune(m.mutateInt(int64(v), math.MaxInt32)) + case byte: // uint8 + vals[i] = byte(m.mutateUInt(uint64(v), math.MaxUint8)) + case string: + if len(v) > maxPerVal { + panic(fmt.Sprintf("cannot mutate bytes of length %d", len(v))) + } + if cap(m.scratch) < maxPerVal { + m.scratch = append(make([]byte, 0, maxPerVal), v...) + } else { + m.scratch = m.scratch[:len(v)] + copy(m.scratch, v) + } + m.mutateBytes(&m.scratch) + vals[i] = string(m.scratch) + case []byte: + if len(v) > maxPerVal { + panic(fmt.Sprintf("cannot mutate bytes of length %d", len(v))) + } + if cap(m.scratch) < maxPerVal { + m.scratch = append(make([]byte, 0, maxPerVal), v...) + } else { + m.scratch = m.scratch[:len(v)] + copy(m.scratch, v) + } + m.mutateBytes(&m.scratch) + vals[i] = m.scratch + default: + panic(fmt.Sprintf("type not supported for mutating: %T", vals[i])) + } +} + +func (m *mutator) mutateInt(v, maxValue int64) int64 { + var max int64 + for { + max = 100 + switch m.rand(2) { + case 0: + // Add a random number + if v >= maxValue { + continue + } + if v > 0 && maxValue-v < max { + // Don't let v exceed maxValue + max = maxValue - v + } + v += int64(1 + m.rand(int(max))) + return v + case 1: + // Subtract a random number + if v <= -maxValue { + continue + } + if v < 0 && maxValue+v < max { + // Don't let v drop below -maxValue + max = maxValue + v + } + v -= int64(1 + m.rand(int(max))) + return v + } + } +} + +func (m *mutator) mutateUInt(v, maxValue uint64) uint64 { + var max uint64 + for { + max = 100 + switch m.rand(2) { + case 0: + // Add a random number + if v >= maxValue { + continue + } + if v > 0 && maxValue-v < max { + // Don't let v exceed maxValue + max = maxValue - v + } + + v += uint64(1 + m.rand(int(max))) + return v + case 1: + // Subtract a random number + if v <= 0 { + continue + } + if v < max { + // Don't let v drop below 0 + max = v + } + v -= uint64(1 + m.rand(int(max))) + return v + } + } +} + +func (m *mutator) mutateFloat(v, maxValue float64) float64 { + var max float64 + for { + switch m.rand(4) { + case 0: + // Add a random number + if v >= maxValue { + continue + } + max = 100 + if v > 0 && maxValue-v < max { + // Don't let v exceed maxValue + max = maxValue - v + } + v += float64(1 + m.rand(int(max))) + return v + case 1: + // Subtract a random number + if v <= -maxValue { + continue + } + max = 100 + if v < 0 && maxValue+v < max { + // Don't let v drop below -maxValue + max = maxValue + v + } + v -= float64(1 + m.rand(int(max))) + return v + case 2: + // Multiply by a random number + absV := math.Abs(v) + if v == 0 || absV >= maxValue { + continue + } + max = 10 + if maxValue/absV < max { + // Don't let v go beyond the minimum or maximum value + max = maxValue / absV + } + v *= float64(1 + m.rand(int(max))) + return v + case 3: + // Divide by a random number + if v == 0 { + continue + } + v /= float64(1 + m.rand(10)) + return v + } + } +} + +type byteSliceMutator func(*mutator, []byte) []byte + +var byteSliceMutators = []byteSliceMutator{ + byteSliceRemoveBytes, + byteSliceInsertRandomBytes, + byteSliceDuplicateBytes, + byteSliceOverwriteBytes, + byteSliceBitFlip, + byteSliceXORByte, + byteSliceSwapByte, + byteSliceArithmeticUint8, + byteSliceArithmeticUint16, + byteSliceArithmeticUint32, + byteSliceArithmeticUint64, + byteSliceOverwriteInterestingUint8, + byteSliceOverwriteInterestingUint16, + byteSliceOverwriteInterestingUint32, + byteSliceInsertConstantBytes, + byteSliceOverwriteConstantBytes, + byteSliceShuffleBytes, + byteSliceSwapBytes, +} + +func (m *mutator) mutateBytes(ptrB *[]byte) { + b := *ptrB + defer func() { + if unsafe.SliceData(*ptrB) != unsafe.SliceData(b) { + panic("data moved to new address") + } + *ptrB = b + }() + + for { + mut := byteSliceMutators[m.rand(len(byteSliceMutators))] + if mutated := mut(m, b); mutated != nil { + b = mutated + return + } + } +} + +var ( + interesting8 = []int8{-128, -1, 0, 1, 16, 32, 64, 100, 127} + interesting16 = []int16{-32768, -129, 128, 255, 256, 512, 1000, 1024, 4096, 32767} + interesting32 = []int32{-2147483648, -100663046, -32769, 32768, 65535, 65536, 100663045, 2147483647} +) + +const ( + maxUint = uint64(^uint(0)) + maxInt = int64(maxUint >> 1) +) + +func init() { + for _, v := range interesting8 { + interesting16 = append(interesting16, int16(v)) + } + for _, v := range interesting16 { + interesting32 = append(interesting32, int32(v)) + } +} diff --git a/testing/internal/fuzz/mutator_test.go b/testing/internal/fuzz/mutator_test.go new file mode 100644 index 0000000..cea7e2e --- /dev/null +++ b/testing/internal/fuzz/mutator_test.go @@ -0,0 +1,117 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package fuzz + +import ( + "bytes" + "fmt" + "os" + "strconv" + "testing" +) + +func BenchmarkMutatorBytes(b *testing.B) { + origEnv := os.Getenv("GODEBUG") + defer func() { os.Setenv("GODEBUG", origEnv) }() + os.Setenv("GODEBUG", fmt.Sprintf("%s,fuzzseed=123", origEnv)) + m := newMutator() + + for _, size := range []int{ + 1, + 10, + 100, + 1000, + 10000, + 100000, + } { + b.Run(strconv.Itoa(size), func(b *testing.B) { + buf := make([]byte, size) + b.ResetTimer() + + for i := 0; i < b.N; i++ { + // resize buffer to the correct shape and reset the PCG + buf = buf[0:size] + m.r = newPcgRand() + m.mutate([]any{buf}, workerSharedMemSize) + } + }) + } +} + +func BenchmarkMutatorString(b *testing.B) { + origEnv := os.Getenv("GODEBUG") + defer func() { os.Setenv("GODEBUG", origEnv) }() + os.Setenv("GODEBUG", fmt.Sprintf("%s,fuzzseed=123", origEnv)) + m := newMutator() + + for _, size := range []int{ + 1, + 10, + 100, + 1000, + 10000, + 100000, + } { + b.Run(strconv.Itoa(size), func(b *testing.B) { + buf := make([]byte, size) + b.ResetTimer() + + for i := 0; i < b.N; i++ { + // resize buffer to the correct shape and reset the PCG + buf = buf[0:size] + m.r = newPcgRand() + m.mutate([]any{string(buf)}, workerSharedMemSize) + } + }) + } +} + +func BenchmarkMutatorAllBasicTypes(b *testing.B) { + origEnv := os.Getenv("GODEBUG") + defer func() { os.Setenv("GODEBUG", origEnv) }() + os.Setenv("GODEBUG", fmt.Sprintf("%s,fuzzseed=123", origEnv)) + m := newMutator() + + types := []any{ + []byte(""), + string(""), + false, + float32(0), + float64(0), + int(0), + int8(0), + int16(0), + int32(0), + int64(0), + uint8(0), + uint16(0), + uint32(0), + uint64(0), + } + + for _, t := range types { + b.Run(fmt.Sprintf("%T", t), func(b *testing.B) { + for i := 0; i < b.N; i++ { + m.r = newPcgRand() + m.mutate([]any{t}, workerSharedMemSize) + } + }) + } +} + +func TestStringImmutability(t *testing.T) { + v := []any{"hello"} + m := newMutator() + m.mutate(v, 1024) + original := v[0].(string) + originalCopy := make([]byte, len(original)) + copy(originalCopy, []byte(original)) + for i := 0; i < 25; i++ { + m.mutate(v, 1024) + } + if !bytes.Equal([]byte(original), originalCopy) { + t.Fatalf("string was mutated: got %x, want %x", []byte(original), originalCopy) + } +} diff --git a/testing/internal/fuzz/mutators_byteslice.go b/testing/internal/fuzz/mutators_byteslice.go new file mode 100644 index 0000000..d9dab1d --- /dev/null +++ b/testing/internal/fuzz/mutators_byteslice.go @@ -0,0 +1,313 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package fuzz + +// byteSliceRemoveBytes removes a random chunk of bytes from b. +func byteSliceRemoveBytes(m *mutator, b []byte) []byte { + if len(b) <= 1 { + return nil + } + pos0 := m.rand(len(b)) + pos1 := pos0 + m.chooseLen(len(b)-pos0) + copy(b[pos0:], b[pos1:]) + b = b[:len(b)-(pos1-pos0)] + return b +} + +// byteSliceInsertRandomBytes inserts a chunk of random bytes into b at a random +// position. +func byteSliceInsertRandomBytes(m *mutator, b []byte) []byte { + pos := m.rand(len(b) + 1) + n := m.chooseLen(1024) + if len(b)+n >= cap(b) { + return nil + } + b = b[:len(b)+n] + copy(b[pos+n:], b[pos:]) + for i := 0; i < n; i++ { + b[pos+i] = byte(m.rand(256)) + } + return b +} + +// byteSliceDuplicateBytes duplicates a chunk of bytes in b and inserts it into +// a random position. +func byteSliceDuplicateBytes(m *mutator, b []byte) []byte { + if len(b) <= 1 { + return nil + } + src := m.rand(len(b)) + dst := m.rand(len(b)) + for dst == src { + dst = m.rand(len(b)) + } + n := m.chooseLen(len(b) - src) + // Use the end of the slice as scratch space to avoid doing an + // allocation. If the slice is too small abort and try something + // else. + if len(b)+(n*2) >= cap(b) { + return nil + } + end := len(b) + // Increase the size of b to fit the duplicated block as well as + // some extra working space + b = b[:end+(n*2)] + // Copy the block of bytes we want to duplicate to the end of the + // slice + copy(b[end+n:], b[src:src+n]) + // Shift the bytes after the splice point n positions to the right + // to make room for the new block + copy(b[dst+n:end+n], b[dst:end]) + // Insert the duplicate block into the splice point + copy(b[dst:], b[end+n:]) + b = b[:end+n] + return b +} + +// byteSliceOverwriteBytes overwrites a chunk of b with another chunk of b. +func byteSliceOverwriteBytes(m *mutator, b []byte) []byte { + if len(b) <= 1 { + return nil + } + src := m.rand(len(b)) + dst := m.rand(len(b)) + for dst == src { + dst = m.rand(len(b)) + } + n := m.chooseLen(len(b) - src - 1) + copy(b[dst:], b[src:src+n]) + return b +} + +// byteSliceBitFlip flips a random bit in a random byte in b. +func byteSliceBitFlip(m *mutator, b []byte) []byte { + if len(b) == 0 { + return nil + } + pos := m.rand(len(b)) + b[pos] ^= 1 << uint(m.rand(8)) + return b +} + +// byteSliceXORByte XORs a random byte in b with a random value. +func byteSliceXORByte(m *mutator, b []byte) []byte { + if len(b) == 0 { + return nil + } + pos := m.rand(len(b)) + // In order to avoid a no-op (where the random value matches + // the existing value), use XOR instead of just setting to + // the random value. + b[pos] ^= byte(1 + m.rand(255)) + return b +} + +// byteSliceSwapByte swaps two random bytes in b. +func byteSliceSwapByte(m *mutator, b []byte) []byte { + if len(b) <= 1 { + return nil + } + src := m.rand(len(b)) + dst := m.rand(len(b)) + for dst == src { + dst = m.rand(len(b)) + } + b[src], b[dst] = b[dst], b[src] + return b +} + +// byteSliceArithmeticUint8 adds/subtracts from a random byte in b. +func byteSliceArithmeticUint8(m *mutator, b []byte) []byte { + if len(b) == 0 { + return nil + } + pos := m.rand(len(b)) + v := byte(m.rand(35) + 1) + if m.r.bool() { + b[pos] += v + } else { + b[pos] -= v + } + return b +} + +// byteSliceArithmeticUint16 adds/subtracts from a random uint16 in b. +func byteSliceArithmeticUint16(m *mutator, b []byte) []byte { + if len(b) < 2 { + return nil + } + v := uint16(m.rand(35) + 1) + if m.r.bool() { + v = 0 - v + } + pos := m.rand(len(b) - 1) + enc := m.randByteOrder() + enc.PutUint16(b[pos:], enc.Uint16(b[pos:])+v) + return b +} + +// byteSliceArithmeticUint32 adds/subtracts from a random uint32 in b. +func byteSliceArithmeticUint32(m *mutator, b []byte) []byte { + if len(b) < 4 { + return nil + } + v := uint32(m.rand(35) + 1) + if m.r.bool() { + v = 0 - v + } + pos := m.rand(len(b) - 3) + enc := m.randByteOrder() + enc.PutUint32(b[pos:], enc.Uint32(b[pos:])+v) + return b +} + +// byteSliceArithmeticUint64 adds/subtracts from a random uint64 in b. +func byteSliceArithmeticUint64(m *mutator, b []byte) []byte { + if len(b) < 8 { + return nil + } + v := uint64(m.rand(35) + 1) + if m.r.bool() { + v = 0 - v + } + pos := m.rand(len(b) - 7) + enc := m.randByteOrder() + enc.PutUint64(b[pos:], enc.Uint64(b[pos:])+v) + return b +} + +// byteSliceOverwriteInterestingUint8 overwrites a random byte in b with an interesting +// value. +func byteSliceOverwriteInterestingUint8(m *mutator, b []byte) []byte { + if len(b) == 0 { + return nil + } + pos := m.rand(len(b)) + b[pos] = byte(interesting8[m.rand(len(interesting8))]) + return b +} + +// byteSliceOverwriteInterestingUint16 overwrites a random uint16 in b with an interesting +// value. +func byteSliceOverwriteInterestingUint16(m *mutator, b []byte) []byte { + if len(b) < 2 { + return nil + } + pos := m.rand(len(b) - 1) + v := uint16(interesting16[m.rand(len(interesting16))]) + m.randByteOrder().PutUint16(b[pos:], v) + return b +} + +// byteSliceOverwriteInterestingUint32 overwrites a random uint16 in b with an interesting +// value. +func byteSliceOverwriteInterestingUint32(m *mutator, b []byte) []byte { + if len(b) < 4 { + return nil + } + pos := m.rand(len(b) - 3) + v := uint32(interesting32[m.rand(len(interesting32))]) + m.randByteOrder().PutUint32(b[pos:], v) + return b +} + +// byteSliceInsertConstantBytes inserts a chunk of constant bytes into a random position in b. +func byteSliceInsertConstantBytes(m *mutator, b []byte) []byte { + if len(b) <= 1 { + return nil + } + dst := m.rand(len(b)) + // TODO(rolandshoemaker,katiehockman): 4096 was mainly picked + // randomly. We may want to either pick a much larger value + // (AFL uses 32768, paired with a similar impl to chooseLen + // which biases towards smaller lengths that grow over time), + // or set the max based on characteristics of the corpus + // (libFuzzer sets a min/max based on the min/max size of + // entries in the corpus and then picks uniformly from + // that range). + n := m.chooseLen(4096) + if len(b)+n >= cap(b) { + return nil + } + b = b[:len(b)+n] + copy(b[dst+n:], b[dst:]) + rb := byte(m.rand(256)) + for i := dst; i < dst+n; i++ { + b[i] = rb + } + return b +} + +// byteSliceOverwriteConstantBytes overwrites a chunk of b with constant bytes. +func byteSliceOverwriteConstantBytes(m *mutator, b []byte) []byte { + if len(b) <= 1 { + return nil + } + dst := m.rand(len(b)) + n := m.chooseLen(len(b) - dst) + rb := byte(m.rand(256)) + for i := dst; i < dst+n; i++ { + b[i] = rb + } + return b +} + +// byteSliceShuffleBytes shuffles a chunk of bytes in b. +func byteSliceShuffleBytes(m *mutator, b []byte) []byte { + if len(b) <= 1 { + return nil + } + dst := m.rand(len(b)) + n := m.chooseLen(len(b) - dst) + if n <= 2 { + return nil + } + // Start at the end of the range, and iterate backwards + // to dst, swapping each element with another element in + // dst:dst+n (Fisher-Yates shuffle). + for i := n - 1; i > 0; i-- { + j := m.rand(i + 1) + b[dst+i], b[dst+j] = b[dst+j], b[dst+i] + } + return b +} + +// byteSliceSwapBytes swaps two chunks of bytes in b. +func byteSliceSwapBytes(m *mutator, b []byte) []byte { + if len(b) <= 1 { + return nil + } + src := m.rand(len(b)) + dst := m.rand(len(b)) + for dst == src { + dst = m.rand(len(b)) + } + // Choose the random length as len(b) - max(src, dst) + // so that we don't attempt to swap a chunk that extends + // beyond the end of the slice + max := dst + if src > max { + max = src + } + n := m.chooseLen(len(b) - max - 1) + // Check that neither chunk intersect, so that we don't end up + // duplicating parts of the input, rather than swapping them + if src > dst && dst+n >= src || dst > src && src+n >= dst { + return nil + } + // Use the end of the slice as scratch space to avoid doing an + // allocation. If the slice is too small abort and try something + // else. + if len(b)+n >= cap(b) { + return nil + } + end := len(b) + b = b[:end+n] + copy(b[end:], b[dst:dst+n]) + copy(b[dst:], b[src:src+n]) + copy(b[src:], b[end:]) + b = b[:end] + return b +} diff --git a/testing/internal/fuzz/mutators_byteslice_test.go b/testing/internal/fuzz/mutators_byteslice_test.go new file mode 100644 index 0000000..b12ef6c --- /dev/null +++ b/testing/internal/fuzz/mutators_byteslice_test.go @@ -0,0 +1,221 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package fuzz + +import ( + "bytes" + "fmt" + "testing" +) + +type mockRand struct { + values []int + counter int + b bool +} + +func (mr *mockRand) uint32() uint32 { + c := mr.values[mr.counter] + mr.counter++ + return uint32(c) +} + +func (mr *mockRand) intn(n int) int { + c := mr.values[mr.counter] + mr.counter++ + return c % n +} + +func (mr *mockRand) uint32n(n uint32) uint32 { + c := mr.values[mr.counter] + mr.counter++ + return uint32(c) % n +} + +func (mr *mockRand) bool() bool { + b := mr.b + mr.b = !mr.b + return b +} + +func (mr *mockRand) save(*uint64, *uint64) { + panic("unimplemented") +} + +func (mr *mockRand) restore(uint64, uint64) { + panic("unimplemented") +} + +func TestByteSliceMutators(t *testing.T) { + for _, tc := range []struct { + name string + mutator func(*mutator, []byte) []byte + randVals []int + input []byte + expected []byte + }{ + { + name: "byteSliceRemoveBytes", + mutator: byteSliceRemoveBytes, + input: []byte{1, 2, 3, 4}, + expected: []byte{4}, + }, + { + name: "byteSliceInsertRandomBytes", + mutator: byteSliceInsertRandomBytes, + input: make([]byte, 4, 8), + expected: []byte{3, 4, 5, 0, 0, 0, 0}, + }, + { + name: "byteSliceDuplicateBytes", + mutator: byteSliceDuplicateBytes, + input: append(make([]byte, 0, 13), []byte{1, 2, 3, 4}...), + expected: []byte{1, 1, 2, 3, 4, 2, 3, 4}, + }, + { + name: "byteSliceOverwriteBytes", + mutator: byteSliceOverwriteBytes, + input: []byte{1, 2, 3, 4}, + expected: []byte{1, 1, 3, 4}, + }, + { + name: "byteSliceBitFlip", + mutator: byteSliceBitFlip, + input: []byte{1, 2, 3, 4}, + expected: []byte{3, 2, 3, 4}, + }, + { + name: "byteSliceXORByte", + mutator: byteSliceXORByte, + input: []byte{1, 2, 3, 4}, + expected: []byte{3, 2, 3, 4}, + }, + { + name: "byteSliceSwapByte", + mutator: byteSliceSwapByte, + input: []byte{1, 2, 3, 4}, + expected: []byte{2, 1, 3, 4}, + }, + { + name: "byteSliceArithmeticUint8", + mutator: byteSliceArithmeticUint8, + input: []byte{1, 2, 3, 4}, + expected: []byte{255, 2, 3, 4}, + }, + { + name: "byteSliceArithmeticUint16", + mutator: byteSliceArithmeticUint16, + input: []byte{1, 2, 3, 4}, + expected: []byte{1, 3, 3, 4}, + }, + { + name: "byteSliceArithmeticUint32", + mutator: byteSliceArithmeticUint32, + input: []byte{1, 2, 3, 4}, + expected: []byte{2, 2, 3, 4}, + }, + { + name: "byteSliceArithmeticUint64", + mutator: byteSliceArithmeticUint64, + input: []byte{1, 2, 3, 4, 5, 6, 7, 8}, + expected: []byte{2, 2, 3, 4, 5, 6, 7, 8}, + }, + { + name: "byteSliceOverwriteInterestingUint8", + mutator: byteSliceOverwriteInterestingUint8, + input: []byte{1, 2, 3, 4}, + expected: []byte{255, 2, 3, 4}, + }, + { + name: "byteSliceOverwriteInterestingUint16", + mutator: byteSliceOverwriteInterestingUint16, + input: []byte{1, 2, 3, 4}, + expected: []byte{255, 127, 3, 4}, + }, + { + name: "byteSliceOverwriteInterestingUint32", + mutator: byteSliceOverwriteInterestingUint32, + input: []byte{1, 2, 3, 4}, + expected: []byte{250, 0, 0, 250}, + }, + { + name: "byteSliceInsertConstantBytes", + mutator: byteSliceInsertConstantBytes, + input: append(make([]byte, 0, 8), []byte{1, 2, 3, 4}...), + expected: []byte{3, 3, 3, 1, 2, 3, 4}, + }, + { + name: "byteSliceOverwriteConstantBytes", + mutator: byteSliceOverwriteConstantBytes, + input: []byte{1, 2, 3, 4}, + expected: []byte{3, 3, 3, 4}, + }, + { + name: "byteSliceShuffleBytes", + mutator: byteSliceShuffleBytes, + input: []byte{1, 2, 3, 4}, + expected: []byte{2, 3, 1, 4}, + }, + { + name: "byteSliceSwapBytes", + mutator: byteSliceSwapBytes, + randVals: []int{0, 2, 0, 2}, + input: append(make([]byte, 0, 9), []byte{1, 2, 3, 4}...), + expected: []byte{3, 2, 1, 4}, + }, + } { + t.Run(tc.name, func(t *testing.T) { + r := &mockRand{values: []int{0, 1, 2, 3, 4, 5}} + if tc.randVals != nil { + r.values = tc.randVals + } + m := &mutator{r: r} + b := tc.mutator(m, tc.input) + if !bytes.Equal(b, tc.expected) { + t.Errorf("got %x, want %x", b, tc.expected) + } + }) + } +} + +func BenchmarkByteSliceMutators(b *testing.B) { + tests := [...]struct { + name string + mutator func(*mutator, []byte) []byte + }{ + {"RemoveBytes", byteSliceRemoveBytes}, + {"InsertRandomBytes", byteSliceInsertRandomBytes}, + {"DuplicateBytes", byteSliceDuplicateBytes}, + {"OverwriteBytes", byteSliceOverwriteBytes}, + {"BitFlip", byteSliceBitFlip}, + {"XORByte", byteSliceXORByte}, + {"SwapByte", byteSliceSwapByte}, + {"ArithmeticUint8", byteSliceArithmeticUint8}, + {"ArithmeticUint16", byteSliceArithmeticUint16}, + {"ArithmeticUint32", byteSliceArithmeticUint32}, + {"ArithmeticUint64", byteSliceArithmeticUint64}, + {"OverwriteInterestingUint8", byteSliceOverwriteInterestingUint8}, + {"OverwriteInterestingUint16", byteSliceOverwriteInterestingUint16}, + {"OverwriteInterestingUint32", byteSliceOverwriteInterestingUint32}, + {"InsertConstantBytes", byteSliceInsertConstantBytes}, + {"OverwriteConstantBytes", byteSliceOverwriteConstantBytes}, + {"ShuffleBytes", byteSliceShuffleBytes}, + {"SwapBytes", byteSliceSwapBytes}, + } + + for _, tc := range tests { + b.Run(tc.name, func(b *testing.B) { + for size := 64; size <= 1024; size *= 2 { + b.Run(fmt.Sprintf("%d", size), func(b *testing.B) { + m := &mutator{r: newPcgRand()} + input := make([]byte, size) + for i := 0; i < b.N; i++ { + tc.mutator(m, input) + } + }) + } + }) + } +} diff --git a/testing/internal/fuzz/pcg.go b/testing/internal/fuzz/pcg.go new file mode 100644 index 0000000..b825104 --- /dev/null +++ b/testing/internal/fuzz/pcg.go @@ -0,0 +1,139 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package fuzz + +import ( + "math/bits" + "os" + "strconv" + "strings" + "sync/atomic" + "time" +) + +type mutatorRand interface { + uint32() uint32 + intn(int) int + uint32n(uint32) uint32 + bool() bool + + save(randState, randInc *uint64) + restore(randState, randInc uint64) +} + +// The functions in pcg implement a 32 bit PRNG with a 64 bit period: pcg xsh rr +// 64 32. See https://www.pcg-random.org/ for more information. This +// implementation is geared specifically towards the needs of fuzzing: Simple +// creation and use, no reproducibility, no concurrency safety, just the +// necessary methods, optimized for speed. + +var globalInc atomic.Uint64 // PCG stream + +const multiplier uint64 = 6364136223846793005 + +// pcgRand is a PRNG. It should not be copied or shared. No Rand methods are +// concurrency safe. +type pcgRand struct { + noCopy noCopy // help avoid mistakes: ask vet to ensure that we don't make a copy + state uint64 + inc uint64 +} + +func godebugSeed() *int { + debug := strings.Split(os.Getenv("GODEBUG"), ",") + for _, f := range debug { + if strings.HasPrefix(f, "fuzzseed=") { + seed, err := strconv.Atoi(strings.TrimPrefix(f, "fuzzseed=")) + if err != nil { + panic("malformed fuzzseed") + } + return &seed + } + } + return nil +} + +// newPcgRand generates a new, seeded Rand, ready for use. +func newPcgRand() *pcgRand { + r := new(pcgRand) + now := uint64(time.Now().UnixNano()) + if seed := godebugSeed(); seed != nil { + now = uint64(*seed) + } + inc := globalInc.Add(1) + r.state = now + r.inc = (inc << 1) | 1 + r.step() + r.state += now + r.step() + return r +} + +func (r *pcgRand) step() { + r.state *= multiplier + r.state += r.inc +} + +func (r *pcgRand) save(randState, randInc *uint64) { + *randState = r.state + *randInc = r.inc +} + +func (r *pcgRand) restore(randState, randInc uint64) { + r.state = randState + r.inc = randInc +} + +// uint32 returns a pseudo-random uint32. +func (r *pcgRand) uint32() uint32 { + x := r.state + r.step() + return bits.RotateLeft32(uint32(((x>>18)^x)>>27), -int(x>>59)) +} + +// intn returns a pseudo-random number in [0, n). +// n must fit in a uint32. +func (r *pcgRand) intn(n int) int { + if int(uint32(n)) != n { + panic("large Intn") + } + return int(r.uint32n(uint32(n))) +} + +// uint32n returns a pseudo-random number in [0, n). +// +// For implementation details, see: +// https://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction +// https://lemire.me/blog/2016/06/30/fast-random-shuffling +func (r *pcgRand) uint32n(n uint32) uint32 { + v := r.uint32() + prod := uint64(v) * uint64(n) + low := uint32(prod) + if low < n { + thresh := uint32(-int32(n)) % n + for low < thresh { + v = r.uint32() + prod = uint64(v) * uint64(n) + low = uint32(prod) + } + } + return uint32(prod >> 32) +} + +// bool generates a random bool. +func (r *pcgRand) bool() bool { + return r.uint32()&1 == 0 +} + +// noCopy may be embedded into structs which must not be copied +// after the first use. +// +// See https://golang.org/issues/8005#issuecomment-190753527 +// for details. +type noCopy struct{} + +// Lock is a no-op used by -copylocks checker from `go vet`. +func (*noCopy) Lock() {} +func (*noCopy) Unlock() {} diff --git a/testing/internal/fuzz/queue.go b/testing/internal/fuzz/queue.go new file mode 100644 index 0000000..195d6eb --- /dev/null +++ b/testing/internal/fuzz/queue.go @@ -0,0 +1,71 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package fuzz + +// queue holds a growable sequence of inputs for fuzzing and minimization. +// +// For now, this is a simple ring buffer +// (https://en.wikipedia.org/wiki/Circular_buffer). +// +// TODO(golang.org/issue/46224): use a prioritization algorithm based on input +// size, previous duration, coverage, and any other metrics that seem useful. +type queue struct { + // elems holds a ring buffer. + // The queue is empty when begin = end. + // The queue is full (until grow is called) when end = begin + N - 1 (mod N) + // where N = cap(elems). + elems []any + head, len int +} + +func (q *queue) cap() int { + return len(q.elems) +} + +func (q *queue) grow() { + oldCap := q.cap() + newCap := oldCap * 2 + if newCap == 0 { + newCap = 8 + } + newElems := make([]any, newCap) + oldLen := q.len + for i := 0; i < oldLen; i++ { + newElems[i] = q.elems[(q.head+i)%oldCap] + } + q.elems = newElems + q.head = 0 +} + +func (q *queue) enqueue(e any) { + if q.len+1 > q.cap() { + q.grow() + } + i := (q.head + q.len) % q.cap() + q.elems[i] = e + q.len++ +} + +func (q *queue) dequeue() (any, bool) { + if q.len == 0 { + return nil, false + } + e := q.elems[q.head] + q.elems[q.head] = nil + q.head = (q.head + 1) % q.cap() + q.len-- + return e, true +} + +func (q *queue) peek() (any, bool) { + if q.len == 0 { + return nil, false + } + return q.elems[q.head], true +} + +func (q *queue) clear() { + *q = queue{} +} diff --git a/testing/internal/fuzz/queue_test.go b/testing/internal/fuzz/queue_test.go new file mode 100644 index 0000000..3b179af --- /dev/null +++ b/testing/internal/fuzz/queue_test.go @@ -0,0 +1,58 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package fuzz + +import "testing" + +func TestQueue(t *testing.T) { + // Zero valued queue should have 0 length and capacity. + var q queue + if n := q.len; n != 0 { + t.Fatalf("empty queue has len %d; want 0", n) + } + if n := q.cap(); n != 0 { + t.Fatalf("empty queue has cap %d; want 0", n) + } + + // As we add elements, len should grow. + N := 32 + for i := 0; i < N; i++ { + q.enqueue(i) + if n := q.len; n != i+1 { + t.Fatalf("after adding %d elements, queue has len %d", i, n) + } + if v, ok := q.peek(); !ok { + t.Fatalf("couldn't peek after adding %d elements", i) + } else if v.(int) != 0 { + t.Fatalf("after adding %d elements, peek is %d; want 0", i, v) + } + } + + // As we remove and add elements, len should shrink and grow. + // We should also remove elements in the same order they were added. + want := 0 + for _, r := range []int{1, 2, 3, 5, 8, 13, 21} { + s := make([]int, 0, r) + for i := 0; i < r; i++ { + if got, ok := q.dequeue(); !ok { + t.Fatalf("after removing %d of %d elements, could not dequeue", i+1, r) + } else if got != want { + t.Fatalf("after removing %d of %d elements, got %d; want %d", i+1, r, got, want) + } else { + s = append(s, got.(int)) + } + want = (want + 1) % N + if n := q.len; n != N-i-1 { + t.Fatalf("after removing %d of %d elements, len is %d; want %d", i+1, r, n, N-i-1) + } + } + for i, v := range s { + q.enqueue(v) + if n := q.len; n != N-r+i+1 { + t.Fatalf("after adding back %d of %d elements, len is %d; want %d", i+1, r, n, n-r+i+1) + } + } + } +} diff --git a/testing/internal/fuzz/sys_posix.go b/testing/internal/fuzz/sys_posix.go new file mode 100644 index 0000000..fec6054 --- /dev/null +++ b/testing/internal/fuzz/sys_posix.go @@ -0,0 +1,130 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build darwin || freebsd || linux + +package fuzz + +import ( + "fmt" + "os" + "os/exec" + "syscall" +) + +type sharedMemSys struct{} + +func sharedMemMapFile(f *os.File, size int, removeOnClose bool) (*sharedMem, error) { + prot := syscall.PROT_READ | syscall.PROT_WRITE + flags := syscall.MAP_FILE | syscall.MAP_SHARED + region, err := syscall.Mmap(int(f.Fd()), 0, size, prot, flags) + if err != nil { + return nil, err + } + + return &sharedMem{f: f, region: region, removeOnClose: removeOnClose}, nil +} + +// Close unmaps the shared memory and closes the temporary file. If this +// sharedMem was created with sharedMemTempFile, Close also removes the file. +func (m *sharedMem) Close() error { + // Attempt all operations, even if we get an error for an earlier operation. + // os.File.Close may fail due to I/O errors, but we still want to delete + // the temporary file. + var errs []error + errs = append(errs, + syscall.Munmap(m.region), + m.f.Close()) + if m.removeOnClose { + errs = append(errs, os.Remove(m.f.Name())) + } + for _, err := range errs { + if err != nil { + return err + } + } + return nil +} + +// setWorkerComm configures communication channels on the cmd that will +// run a worker process. +func setWorkerComm(cmd *exec.Cmd, comm workerComm) { + mem := <-comm.memMu + memFile := mem.f + comm.memMu <- mem + cmd.ExtraFiles = []*os.File{comm.fuzzIn, comm.fuzzOut, memFile} +} + +// getWorkerComm returns communication channels in the worker process. +func getWorkerComm() (comm workerComm, err error) { + fuzzIn := os.NewFile(3, "fuzz_in") + fuzzOut := os.NewFile(4, "fuzz_out") + memFile := os.NewFile(5, "fuzz_mem") + fi, err := memFile.Stat() + if err != nil { + return workerComm{}, err + } + size := int(fi.Size()) + if int64(size) != fi.Size() { + return workerComm{}, fmt.Errorf("fuzz temp file exceeds maximum size") + } + removeOnClose := false + mem, err := sharedMemMapFile(memFile, size, removeOnClose) + if err != nil { + return workerComm{}, err + } + memMu := make(chan *sharedMem, 1) + memMu <- mem + return workerComm{fuzzIn: fuzzIn, fuzzOut: fuzzOut, memMu: memMu}, nil +} + +// isInterruptError returns whether an error was returned by a process that +// was terminated by an interrupt signal (SIGINT). +func isInterruptError(err error) bool { + exitErr, ok := err.(*exec.ExitError) + if !ok || exitErr.ExitCode() >= 0 { + return false + } + status := exitErr.Sys().(syscall.WaitStatus) + return status.Signal() == syscall.SIGINT +} + +// terminationSignal checks if err is an exec.ExitError with a signal status. +// If it is, terminationSignal returns the signal and true. +// If not, -1 and false. +func terminationSignal(err error) (os.Signal, bool) { + exitErr, ok := err.(*exec.ExitError) + if !ok || exitErr.ExitCode() >= 0 { + return syscall.Signal(-1), false + } + status := exitErr.Sys().(syscall.WaitStatus) + return status.Signal(), status.Signaled() +} + +// isCrashSignal returns whether a signal was likely to have been caused by an +// error in the program that received it, triggered by a fuzz input. For +// example, SIGSEGV would be received after a nil pointer dereference. +// Other signals like SIGKILL or SIGHUP are more likely to have been sent by +// another process, and we shouldn't record a crasher if the worker process +// receives one of these. +// +// Note that Go installs its own signal handlers on startup, so some of these +// signals may only be received if signal handlers are changed. For example, +// SIGSEGV is normally transformed into a panic that causes the process to exit +// with status 2 if not recovered, which we handle as a crash. +func isCrashSignal(signal os.Signal) bool { + switch signal { + case + syscall.SIGILL, // illegal instruction + syscall.SIGTRAP, // breakpoint + syscall.SIGABRT, // abort() called + syscall.SIGBUS, // invalid memory access (e.g., misaligned address) + syscall.SIGFPE, // math error, e.g., integer divide by zero + syscall.SIGSEGV, // invalid memory access (e.g., write to read-only) + syscall.SIGPIPE: // sent data to closed pipe or socket + return true + default: + return false + } +} diff --git a/testing/internal/fuzz/sys_unimplemented.go b/testing/internal/fuzz/sys_unimplemented.go new file mode 100644 index 0000000..8687c1f --- /dev/null +++ b/testing/internal/fuzz/sys_unimplemented.go @@ -0,0 +1,44 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// If you update this constraint, also update internal/platform.FuzzSupported. +// +//go:build !darwin && !freebsd && !linux && !windows + +package fuzz + +import ( + "os" + "os/exec" +) + +type sharedMemSys struct{} + +func sharedMemMapFile(f *os.File, size int, removeOnClose bool) (*sharedMem, error) { + panic("not implemented") +} + +func (m *sharedMem) Close() error { + panic("not implemented") +} + +func setWorkerComm(cmd *exec.Cmd, comm workerComm) { + panic("not implemented") +} + +func getWorkerComm() (comm workerComm, err error) { + panic("not implemented") +} + +func isInterruptError(err error) bool { + panic("not implemented") +} + +func terminationSignal(err error) (os.Signal, bool) { + panic("not implemented") +} + +func isCrashSignal(signal os.Signal) bool { + panic("not implemented") +} diff --git a/testing/internal/fuzz/sys_windows.go b/testing/internal/fuzz/sys_windows.go new file mode 100644 index 0000000..82c9703 --- /dev/null +++ b/testing/internal/fuzz/sys_windows.go @@ -0,0 +1,144 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package fuzz + +import ( + "fmt" + "os" + "os/exec" + "syscall" + "unsafe" +) + +type sharedMemSys struct { + mapObj syscall.Handle +} + +func sharedMemMapFile(f *os.File, size int, removeOnClose bool) (mem *sharedMem, err error) { + defer func() { + if err != nil { + err = fmt.Errorf("mapping temporary file %s: %w", f.Name(), err) + } + }() + + // Create a file mapping object. The object itself is not shared. + mapObj, err := syscall.CreateFileMapping( + syscall.Handle(f.Fd()), // fhandle + nil, // sa + syscall.PAGE_READWRITE, // prot + 0, // maxSizeHigh + 0, // maxSizeLow + nil, // name + ) + if err != nil { + return nil, err + } + + // Create a view from the file mapping object. + access := uint32(syscall.FILE_MAP_READ | syscall.FILE_MAP_WRITE) + addr, err := syscall.MapViewOfFile( + mapObj, // handle + access, // access + 0, // offsetHigh + 0, // offsetLow + uintptr(size), // length + ) + if err != nil { + syscall.CloseHandle(mapObj) + return nil, err + } + + region := unsafe.Slice((*byte)(unsafe.Pointer(addr)), size) + return &sharedMem{ + f: f, + region: region, + removeOnClose: removeOnClose, + sys: sharedMemSys{mapObj: mapObj}, + }, nil +} + +// Close unmaps the shared memory and closes the temporary file. If this +// sharedMem was created with sharedMemTempFile, Close also removes the file. +func (m *sharedMem) Close() error { + // Attempt all operations, even if we get an error for an earlier operation. + // os.File.Close may fail due to I/O errors, but we still want to delete + // the temporary file. + var errs []error + errs = append(errs, + syscall.UnmapViewOfFile(uintptr(unsafe.Pointer(&m.region[0]))), + syscall.CloseHandle(m.sys.mapObj), + m.f.Close()) + if m.removeOnClose { + errs = append(errs, os.Remove(m.f.Name())) + } + for _, err := range errs { + if err != nil { + return err + } + } + return nil +} + +// setWorkerComm configures communication channels on the cmd that will +// run a worker process. +func setWorkerComm(cmd *exec.Cmd, comm workerComm) { + mem := <-comm.memMu + memFD := mem.f.Fd() + comm.memMu <- mem + syscall.SetHandleInformation(syscall.Handle(comm.fuzzIn.Fd()), syscall.HANDLE_FLAG_INHERIT, 1) + syscall.SetHandleInformation(syscall.Handle(comm.fuzzOut.Fd()), syscall.HANDLE_FLAG_INHERIT, 1) + syscall.SetHandleInformation(syscall.Handle(memFD), syscall.HANDLE_FLAG_INHERIT, 1) + cmd.Env = append(cmd.Env, fmt.Sprintf("GO_TEST_FUZZ_WORKER_HANDLES=%x,%x,%x", comm.fuzzIn.Fd(), comm.fuzzOut.Fd(), memFD)) + cmd.SysProcAttr = &syscall.SysProcAttr{AdditionalInheritedHandles: []syscall.Handle{syscall.Handle(comm.fuzzIn.Fd()), syscall.Handle(comm.fuzzOut.Fd()), syscall.Handle(memFD)}} +} + +// getWorkerComm returns communication channels in the worker process. +func getWorkerComm() (comm workerComm, err error) { + v := os.Getenv("GO_TEST_FUZZ_WORKER_HANDLES") + if v == "" { + return workerComm{}, fmt.Errorf("GO_TEST_FUZZ_WORKER_HANDLES not set") + } + var fuzzInFD, fuzzOutFD, memFileFD uintptr + if _, err := fmt.Sscanf(v, "%x,%x,%x", &fuzzInFD, &fuzzOutFD, &memFileFD); err != nil { + return workerComm{}, fmt.Errorf("parsing GO_TEST_FUZZ_WORKER_HANDLES=%s: %v", v, err) + } + + fuzzIn := os.NewFile(fuzzInFD, "fuzz_in") + fuzzOut := os.NewFile(fuzzOutFD, "fuzz_out") + memFile := os.NewFile(memFileFD, "fuzz_mem") + fi, err := memFile.Stat() + if err != nil { + return workerComm{}, fmt.Errorf("worker checking temp file size: %w", err) + } + size := int(fi.Size()) + if int64(size) != fi.Size() { + return workerComm{}, fmt.Errorf("fuzz temp file exceeds maximum size") + } + removeOnClose := false + mem, err := sharedMemMapFile(memFile, size, removeOnClose) + if err != nil { + return workerComm{}, err + } + memMu := make(chan *sharedMem, 1) + memMu <- mem + + return workerComm{fuzzIn: fuzzIn, fuzzOut: fuzzOut, memMu: memMu}, nil +} + +func isInterruptError(err error) bool { + // On Windows, we can't tell whether the process was interrupted by the error + // returned by Wait. It looks like an ExitError with status 1. + return false +} + +// terminationSignal returns -1 and false because Windows doesn't have signals. +func terminationSignal(err error) (os.Signal, bool) { + return syscall.Signal(-1), false +} + +// isCrashSignal is not implemented because Windows doesn't have signals. +func isCrashSignal(signal os.Signal) bool { + panic("not implemented: no signals on windows") +} diff --git a/testing/internal/fuzz/trace.go b/testing/internal/fuzz/trace.go new file mode 100644 index 0000000..a15c370 --- /dev/null +++ b/testing/internal/fuzz/trace.go @@ -0,0 +1,35 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !libfuzzer + +package fuzz + +import _ "unsafe" // for go:linkname + +//go:linkname libfuzzerTraceCmp1 runtime.libfuzzerTraceCmp1 +//go:linkname libfuzzerTraceCmp2 runtime.libfuzzerTraceCmp2 +//go:linkname libfuzzerTraceCmp4 runtime.libfuzzerTraceCmp4 +//go:linkname libfuzzerTraceCmp8 runtime.libfuzzerTraceCmp8 + +//go:linkname libfuzzerTraceConstCmp1 runtime.libfuzzerTraceConstCmp1 +//go:linkname libfuzzerTraceConstCmp2 runtime.libfuzzerTraceConstCmp2 +//go:linkname libfuzzerTraceConstCmp4 runtime.libfuzzerTraceConstCmp4 +//go:linkname libfuzzerTraceConstCmp8 runtime.libfuzzerTraceConstCmp8 + +//go:linkname libfuzzerHookStrCmp runtime.libfuzzerHookStrCmp +//go:linkname libfuzzerHookEqualFold runtime.libfuzzerHookEqualFold + +func libfuzzerTraceCmp1(arg0, arg1 uint8, fakePC uint) {} +func libfuzzerTraceCmp2(arg0, arg1 uint16, fakePC uint) {} +func libfuzzerTraceCmp4(arg0, arg1 uint32, fakePC uint) {} +func libfuzzerTraceCmp8(arg0, arg1 uint64, fakePC uint) {} + +func libfuzzerTraceConstCmp1(arg0, arg1 uint8, fakePC uint) {} +func libfuzzerTraceConstCmp2(arg0, arg1 uint16, fakePC uint) {} +func libfuzzerTraceConstCmp4(arg0, arg1 uint32, fakePC uint) {} +func libfuzzerTraceConstCmp8(arg0, arg1 uint64, fakePC uint) {} + +func libfuzzerHookStrCmp(arg0, arg1 string, fakePC uint) {} +func libfuzzerHookEqualFold(arg0, arg1 string, fakePC uint) {} diff --git a/testing/internal/fuzz/worker.go b/testing/internal/fuzz/worker.go new file mode 100644 index 0000000..9ee2f27 --- /dev/null +++ b/testing/internal/fuzz/worker.go @@ -0,0 +1,1195 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package fuzz + +import ( + "bytes" + "context" + "crypto/sha256" + "encoding/json" + "errors" + "fmt" + "io" + "os" + "os/exec" + "reflect" + "runtime" + "sync" + "time" +) + +const ( + // workerFuzzDuration is the amount of time a worker can spend testing random + // variations of an input given by the coordinator. + workerFuzzDuration = 100 * time.Millisecond + + // workerTimeoutDuration is the amount of time a worker can go without + // responding to the coordinator before being stopped. + workerTimeoutDuration = 1 * time.Second + + // workerExitCode is used as an exit code by fuzz worker processes after an internal error. + // This distinguishes internal errors from uncontrolled panics and other crashes. + // Keep in sync with internal/fuzz.workerExitCode. + workerExitCode = 70 + + // workerSharedMemSize is the maximum size of the shared memory file used to + // communicate with workers. This limits the size of fuzz inputs. + workerSharedMemSize = 100 << 20 // 100 MB +) + +// worker manages a worker process running a test binary. The worker object +// exists only in the coordinator (the process started by 'go test -fuzz'). +// workerClient is used by the coordinator to send RPCs to the worker process, +// which handles them with workerServer. +type worker struct { + dir string // working directory, same as package directory + binPath string // path to test executable + args []string // arguments for test executable + env []string // environment for test executable + + coordinator *coordinator + + memMu chan *sharedMem // mutex guarding shared memory with worker; persists across processes. + + cmd *exec.Cmd // current worker process + client *workerClient // used to communicate with worker process + waitErr error // last error returned by wait, set before termC is closed. + interrupted bool // true after stop interrupts a running worker. + termC chan struct{} // closed by wait when worker process terminates +} + +func newWorker(c *coordinator, dir, binPath string, args, env []string) (*worker, error) { + mem, err := sharedMemTempFile(workerSharedMemSize) + if err != nil { + return nil, err + } + memMu := make(chan *sharedMem, 1) + memMu <- mem + return &worker{ + dir: dir, + binPath: binPath, + args: args, + env: env[:len(env):len(env)], // copy on append to ensure workers don't overwrite each other. + coordinator: c, + memMu: memMu, + }, nil +} + +// cleanup releases persistent resources associated with the worker. +func (w *worker) cleanup() error { + mem := <-w.memMu + if mem == nil { + return nil + } + close(w.memMu) + return mem.Close() +} + +// coordinate runs the test binary to perform fuzzing. +// +// coordinate loops until ctx is canceled or a fatal error is encountered. +// If a test process terminates unexpectedly while fuzzing, coordinate will +// attempt to restart and continue unless the termination can be attributed +// to an interruption (from a timer or the user). +// +// While looping, coordinate receives inputs from the coordinator, passes +// those inputs to the worker process, then passes the results back to +// the coordinator. +func (w *worker) coordinate(ctx context.Context) error { + // Main event loop. + for { + // Start or restart the worker if it's not running. + if !w.isRunning() { + if err := w.startAndPing(ctx); err != nil { + return err + } + } + + select { + case <-ctx.Done(): + // Worker was told to stop. + err := w.stop() + if err != nil && !w.interrupted && !isInterruptError(err) { + return err + } + return ctx.Err() + + case <-w.termC: + // Worker process terminated unexpectedly while waiting for input. + err := w.stop() + if w.interrupted { + panic("worker interrupted after unexpected termination") + } + if err == nil || isInterruptError(err) { + // Worker stopped, either by exiting with status 0 or after being + // interrupted with a signal that was not sent by the coordinator. + // + // When the user presses ^C, on POSIX platforms, SIGINT is delivered to + // all processes in the group concurrently, and the worker may see it + // before the coordinator. The worker should exit 0 gracefully (in + // theory). + // + // This condition is probably intended by the user, so suppress + // the error. + return nil + } + if exitErr, ok := err.(*exec.ExitError); ok && exitErr.ExitCode() == workerExitCode { + // Worker exited with a code indicating F.Fuzz was not called correctly, + // for example, F.Fail was called first. + return fmt.Errorf("fuzzing process exited unexpectedly due to an internal failure: %w", err) + } + // Worker exited non-zero or was terminated by a non-interrupt + // signal (for example, SIGSEGV) while fuzzing. + return fmt.Errorf("fuzzing process hung or terminated unexpectedly: %w", err) + // TODO(jayconrod,katiehockman): if -keepfuzzing, restart worker. + + case input := <-w.coordinator.inputC: + // Received input from coordinator. + args := fuzzArgs{ + Limit: input.limit, + Timeout: input.timeout, + Warmup: input.warmup, + CoverageData: input.coverageData, + } + entry, resp, isInternalError, err := w.client.fuzz(ctx, input.entry, args) + canMinimize := true + if err != nil { + // Error communicating with worker. + w.stop() + if ctx.Err() != nil { + // Timeout or interruption. + return ctx.Err() + } + if w.interrupted { + // Communication error before we stopped the worker. + // Report an error, but don't record a crasher. + return fmt.Errorf("communicating with fuzzing process: %v", err) + } + if sig, ok := terminationSignal(w.waitErr); ok && !isCrashSignal(sig) { + // Worker terminated by a signal that probably wasn't caused by a + // specific input to the fuzz function. For example, on Linux, + // the kernel (OOM killer) may send SIGKILL to a process using a lot + // of memory. Or the shell might send SIGHUP when the terminal + // is closed. Don't record a crasher. + return fmt.Errorf("fuzzing process terminated by unexpected signal; no crash will be recorded: %v", w.waitErr) + } + if isInternalError { + // An internal error occurred which shouldn't be considered + // a crash. + return err + } + // Unexpected termination. Set error message and fall through. + // We'll restart the worker on the next iteration. + // Don't attempt to minimize this since it crashed the worker. + resp.Err = fmt.Sprintf("fuzzing process hung or terminated unexpectedly: %v", w.waitErr) + canMinimize = false + } + result := fuzzResult{ + limit: input.limit, + count: resp.Count, + totalDuration: resp.TotalDuration, + entryDuration: resp.InterestingDuration, + entry: entry, + crasherMsg: resp.Err, + coverageData: resp.CoverageData, + canMinimize: canMinimize, + } + w.coordinator.resultC <- result + + case input := <-w.coordinator.minimizeC: + // Received input to minimize from coordinator. + result, err := w.minimize(ctx, input) + if err != nil { + // Error minimizing. Send back the original input. If it didn't cause + // an error before, report it as causing an error now. + // TODO: double-check this is handled correctly when + // implementing -keepfuzzing. + result = fuzzResult{ + entry: input.entry, + crasherMsg: input.crasherMsg, + canMinimize: false, + limit: input.limit, + } + if result.crasherMsg == "" { + result.crasherMsg = err.Error() + } + } + if shouldPrintDebugInfo() { + w.coordinator.debugLogf( + "input minimized, id: %s, original id: %s, crasher: %t, originally crasher: %t, minimizing took: %s", + result.entry.Path, + input.entry.Path, + result.crasherMsg != "", + input.crasherMsg != "", + result.totalDuration, + ) + } + w.coordinator.resultC <- result + } + } +} + +// minimize tells a worker process to attempt to find a smaller value that +// either causes an error (if we started minimizing because we found an input +// that causes an error) or preserves new coverage (if we started minimizing +// because we found an input that expands coverage). +func (w *worker) minimize(ctx context.Context, input fuzzMinimizeInput) (min fuzzResult, err error) { + if w.coordinator.opts.MinimizeTimeout != 0 { + var cancel func() + ctx, cancel = context.WithTimeout(ctx, w.coordinator.opts.MinimizeTimeout) + defer cancel() + } + + args := minimizeArgs{ + Limit: input.limit, + Timeout: input.timeout, + KeepCoverage: input.keepCoverage, + } + entry, resp, err := w.client.minimize(ctx, input.entry, args) + if err != nil { + // Error communicating with worker. + w.stop() + if ctx.Err() != nil || w.interrupted || isInterruptError(w.waitErr) { + // Worker was interrupted, possibly by the user pressing ^C. + // Normally, workers can handle interrupts and timeouts gracefully and + // will return without error. An error here indicates the worker + // may not have been in a good state, but the error won't be meaningful + // to the user. Just return the original crasher without logging anything. + return fuzzResult{ + entry: input.entry, + crasherMsg: input.crasherMsg, + coverageData: input.keepCoverage, + canMinimize: false, + limit: input.limit, + }, nil + } + return fuzzResult{ + entry: entry, + crasherMsg: fmt.Sprintf("fuzzing process hung or terminated unexpectedly while minimizing: %v", err), + canMinimize: false, + limit: input.limit, + count: resp.Count, + totalDuration: resp.Duration, + }, nil + } + + if input.crasherMsg != "" && resp.Err == "" { + return fuzzResult{}, fmt.Errorf("attempted to minimize a crash but could not reproduce") + } + + return fuzzResult{ + entry: entry, + crasherMsg: resp.Err, + coverageData: resp.CoverageData, + canMinimize: false, + limit: input.limit, + count: resp.Count, + totalDuration: resp.Duration, + }, nil +} + +func (w *worker) isRunning() bool { + return w.cmd != nil +} + +// startAndPing starts the worker process and sends it a message to make sure it +// can communicate. +// +// startAndPing returns an error if any part of this didn't work, including if +// the context is expired or the worker process was interrupted before it +// responded. Errors that happen after start but before the ping response +// likely indicate that the worker did not call F.Fuzz or called F.Fail first. +// We don't record crashers for these errors. +func (w *worker) startAndPing(ctx context.Context) error { + if ctx.Err() != nil { + return ctx.Err() + } + if err := w.start(); err != nil { + return err + } + if err := w.client.ping(ctx); err != nil { + w.stop() + if ctx.Err() != nil { + return ctx.Err() + } + if isInterruptError(err) { + // User may have pressed ^C before worker responded. + return err + } + // TODO: record and return stderr. + return fmt.Errorf("fuzzing process terminated without fuzzing: %w", err) + } + return nil +} + +// start runs a new worker process. +// +// If the process couldn't be started, start returns an error. Start won't +// return later termination errors from the process if they occur. +// +// If the process starts successfully, start returns nil. stop must be called +// once later to clean up, even if the process terminates on its own. +// +// When the process terminates, w.waitErr is set to the error (if any), and +// w.termC is closed. +func (w *worker) start() (err error) { + if w.isRunning() { + panic("worker already started") + } + w.waitErr = nil + w.interrupted = false + w.termC = nil + + cmd := exec.Command(w.binPath, w.args...) + cmd.Dir = w.dir + cmd.Env = w.env[:len(w.env):len(w.env)] // copy on append to ensure workers don't overwrite each other. + + // Create the "fuzz_in" and "fuzz_out" pipes so we can communicate with + // the worker. We don't use stdin and stdout, since the test binary may + // do something else with those. + // + // Each pipe has a reader and a writer. The coordinator writes to fuzzInW + // and reads from fuzzOutR. The worker inherits fuzzInR and fuzzOutW. + // The coordinator closes fuzzInR and fuzzOutW after starting the worker, + // since we have no further need of them. + fuzzInR, fuzzInW, err := os.Pipe() + if err != nil { + return err + } + defer fuzzInR.Close() + fuzzOutR, fuzzOutW, err := os.Pipe() + if err != nil { + fuzzInW.Close() + return err + } + defer fuzzOutW.Close() + setWorkerComm(cmd, workerComm{fuzzIn: fuzzInR, fuzzOut: fuzzOutW, memMu: w.memMu}) + + // Start the worker process. + if err := cmd.Start(); err != nil { + fuzzInW.Close() + fuzzOutR.Close() + return err + } + + // Worker started successfully. + // After this, w.client owns fuzzInW and fuzzOutR, so w.client.Close must be + // called later by stop. + w.cmd = cmd + w.termC = make(chan struct{}) + comm := workerComm{fuzzIn: fuzzInW, fuzzOut: fuzzOutR, memMu: w.memMu} + m := newMutator() + w.client = newWorkerClient(comm, m) + + go func() { + w.waitErr = w.cmd.Wait() + close(w.termC) + }() + + return nil +} + +// stop tells the worker process to exit by closing w.client, then blocks until +// it terminates. If the worker doesn't terminate after a short time, stop +// signals it with os.Interrupt (where supported), then os.Kill. +// +// stop returns the error the process terminated with, if any (same as +// w.waitErr). +// +// stop must be called at least once after start returns successfully, even if +// the worker process terminates unexpectedly. +func (w *worker) stop() error { + if w.termC == nil { + panic("worker was not started successfully") + } + select { + case <-w.termC: + // Worker already terminated. + if w.client == nil { + // stop already called. + return w.waitErr + } + // Possible unexpected termination. + w.client.Close() + w.cmd = nil + w.client = nil + return w.waitErr + default: + // Worker still running. + } + + // Tell the worker to stop by closing fuzz_in. It won't actually stop until it + // finishes with earlier calls. + closeC := make(chan struct{}) + go func() { + w.client.Close() + close(closeC) + }() + + sig := os.Interrupt + if runtime.GOOS == "windows" { + // Per https://golang.org/pkg/os/#Signal, “Interrupt is not implemented on + // Windows; using it with os.Process.Signal will return an error.” + // Fall back to Kill instead. + sig = os.Kill + } + + t := time.NewTimer(workerTimeoutDuration) + for { + select { + case <-w.termC: + // Worker terminated. + t.Stop() + <-closeC + w.cmd = nil + w.client = nil + return w.waitErr + + case <-t.C: + // Timer fired before worker terminated. + w.interrupted = true + switch sig { + case os.Interrupt: + // Try to stop the worker with SIGINT and wait a little longer. + w.cmd.Process.Signal(sig) + sig = os.Kill + t.Reset(workerTimeoutDuration) + + case os.Kill: + // Try to stop the worker with SIGKILL and keep waiting. + w.cmd.Process.Signal(sig) + sig = nil + t.Reset(workerTimeoutDuration) + + case nil: + // Still waiting. Print a message to let the user know why. + fmt.Fprintf(w.coordinator.opts.Log, "waiting for fuzzing process to terminate...\n") + } + } + } +} + +// RunFuzzWorker is called in a worker process to communicate with the +// coordinator process in order to fuzz random inputs. RunFuzzWorker loops +// until the coordinator tells it to stop. +// +// fn is a wrapper on the fuzz function. It may return an error to indicate +// a given input "crashed". The coordinator will also record a crasher if +// the function times out or terminates the process. +// +// RunFuzzWorker returns an error if it could not communicate with the +// coordinator process. +func RunFuzzWorker(ctx context.Context, fn func(CorpusEntry) error) error { + comm, err := getWorkerComm() + if err != nil { + return err + } + srv := &workerServer{ + workerComm: comm, + fuzzFn: func(e CorpusEntry) (time.Duration, error) { + timer := time.AfterFunc(10*time.Second, func() { + panic("deadlocked!") // this error message won't be printed + }) + defer timer.Stop() + start := time.Now() + err := fn(e) + return time.Since(start), err + }, + m: newMutator(), + } + return srv.serve(ctx) +} + +// call is serialized and sent from the coordinator on fuzz_in. It acts as +// a minimalist RPC mechanism. Exactly one of its fields must be set to indicate +// which method to call. +type call struct { + Ping *pingArgs + Fuzz *fuzzArgs + Minimize *minimizeArgs +} + +// minimizeArgs contains arguments to workerServer.minimize. The value to +// minimize is already in shared memory. +type minimizeArgs struct { + // Timeout is the time to spend minimizing. This may include time to start up, + // especially if the input causes the worker process to terminated, requiring + // repeated restarts. + Timeout time.Duration + + // Limit is the maximum number of values to test, without spending more time + // than Duration. 0 indicates no limit. + Limit int64 + + // KeepCoverage is a set of coverage counters the worker should attempt to + // keep in minimized values. When provided, the worker will reject inputs that + // don't cause at least one of these bits to be set. + KeepCoverage []byte + + // Index is the index of the fuzz target parameter to be minimized. + Index int +} + +// minimizeResponse contains results from workerServer.minimize. +type minimizeResponse struct { + // WroteToMem is true if the worker found a smaller input and wrote it to + // shared memory. If minimizeArgs.KeepCoverage was set, the minimized input + // preserved at least one coverage bit and did not cause an error. + // Otherwise, the minimized input caused some error, recorded in Err. + WroteToMem bool + + // Err is the error string caused by the value in shared memory, if any. + Err string + + // CoverageData is the set of coverage bits activated by the minimized value + // in shared memory. When set, it contains at least one bit from KeepCoverage. + // CoverageData will be nil if Err is set or if minimization failed. + CoverageData []byte + + // Duration is the time spent minimizing, not including starting or cleaning up. + Duration time.Duration + + // Count is the number of values tested. + Count int64 +} + +// fuzzArgs contains arguments to workerServer.fuzz. The value to fuzz is +// passed in shared memory. +type fuzzArgs struct { + // Timeout is the time to spend fuzzing, not including starting or + // cleaning up. + Timeout time.Duration + + // Limit is the maximum number of values to test, without spending more time + // than Duration. 0 indicates no limit. + Limit int64 + + // Warmup indicates whether this is part of a warmup run, meaning that + // fuzzing should not occur. If coverageEnabled is true, then coverage data + // should be reported. + Warmup bool + + // CoverageData is the coverage data. If set, the worker should update its + // local coverage data prior to fuzzing. + CoverageData []byte +} + +// fuzzResponse contains results from workerServer.fuzz. +type fuzzResponse struct { + // Duration is the time spent fuzzing, not including starting or cleaning up. + TotalDuration time.Duration + InterestingDuration time.Duration + + // Count is the number of values tested. + Count int64 + + // CoverageData is set if the value in shared memory expands coverage + // and therefore may be interesting to the coordinator. + CoverageData []byte + + // Err is the error string caused by the value in shared memory, which is + // non-empty if the value in shared memory caused a crash. + Err string + + // InternalErr is the error string caused by an internal error in the + // worker. This shouldn't be considered a crasher. + InternalErr string +} + +// pingArgs contains arguments to workerServer.ping. +type pingArgs struct{} + +// pingResponse contains results from workerServer.ping. +type pingResponse struct{} + +// workerComm holds pipes and shared memory used for communication +// between the coordinator process (client) and a worker process (server). +// These values are unique to each worker; they are shared only with the +// coordinator, not with other workers. +// +// Access to shared memory is synchronized implicitly over the RPC protocol +// implemented in workerServer and workerClient. During a call, the client +// (worker) has exclusive access to shared memory; at other times, the server +// (coordinator) has exclusive access. +type workerComm struct { + fuzzIn, fuzzOut *os.File + memMu chan *sharedMem // mutex guarding shared memory +} + +// workerServer is a minimalist RPC server, run by fuzz worker processes. +// It allows the coordinator process (using workerClient) to call methods in a +// worker process. This system allows the coordinator to run multiple worker +// processes in parallel and to collect inputs that caused crashes from shared +// memory after a worker process terminates unexpectedly. +type workerServer struct { + workerComm + m *mutator + + // coverageMask is the local coverage data for the worker. It is + // periodically updated to reflect the data in the coordinator when new + // coverage is found. + coverageMask []byte + + // fuzzFn runs the worker's fuzz target on the given input and returns an + // error if it finds a crasher (the process may also exit or crash), and the + // time it took to run the input. It sets a deadline of 10 seconds, at which + // point it will panic with the assumption that the process is hanging or + // deadlocked. + fuzzFn func(CorpusEntry) (time.Duration, error) +} + +// serve reads serialized RPC messages on fuzzIn. When serve receives a message, +// it calls the corresponding method, then sends the serialized result back +// on fuzzOut. +// +// serve handles RPC calls synchronously; it will not attempt to read a message +// until the previous call has finished. +// +// serve returns errors that occurred when communicating over pipes. serve +// does not return errors from method calls; those are passed through serialized +// responses. +func (ws *workerServer) serve(ctx context.Context) error { + enc := json.NewEncoder(ws.fuzzOut) + dec := json.NewDecoder(&contextReader{ctx: ctx, r: ws.fuzzIn}) + for { + var c call + if err := dec.Decode(&c); err != nil { + if err == io.EOF || err == ctx.Err() { + return nil + } else { + return err + } + } + + var resp any + switch { + case c.Fuzz != nil: + resp = ws.fuzz(ctx, *c.Fuzz) + case c.Minimize != nil: + resp = ws.minimize(ctx, *c.Minimize) + case c.Ping != nil: + resp = ws.ping(ctx, *c.Ping) + default: + return errors.New("no arguments provided for any call") + } + + if err := enc.Encode(resp); err != nil { + return err + } + } +} + +// chainedMutations is how many mutations are applied before the worker +// resets the input to its original state. +// NOTE: this number was picked without much thought. It is low enough that +// it seems to create a significant diversity in mutated inputs. We may want +// to consider looking into this more closely once we have a proper performance +// testing framework. Another option is to randomly pick the number of chained +// mutations on each invocation of the workerServer.fuzz method (this appears to +// be what libFuzzer does, although there seems to be no documentation which +// explains why this choice was made.) +const chainedMutations = 5 + +// fuzz runs the test function on random variations of the input value in shared +// memory for a limited duration or number of iterations. +// +// fuzz returns early if it finds an input that crashes the fuzz function (with +// fuzzResponse.Err set) or an input that expands coverage (with +// fuzzResponse.InterestingDuration set). +// +// fuzz does not modify the input in shared memory. Instead, it saves the +// initial PRNG state in shared memory and increments a counter in shared +// memory before each call to the test function. The caller may reconstruct +// the crashing input with this information, since the PRNG is deterministic. +func (ws *workerServer) fuzz(ctx context.Context, args fuzzArgs) (resp fuzzResponse) { + if args.CoverageData != nil { + if ws.coverageMask != nil && len(args.CoverageData) != len(ws.coverageMask) { + resp.InternalErr = fmt.Sprintf("unexpected size for CoverageData: got %d, expected %d", len(args.CoverageData), len(ws.coverageMask)) + return resp + } + ws.coverageMask = args.CoverageData + } + start := time.Now() + defer func() { resp.TotalDuration = time.Since(start) }() + + if args.Timeout != 0 { + var cancel func() + ctx, cancel = context.WithTimeout(ctx, args.Timeout) + defer cancel() + } + mem := <-ws.memMu + ws.m.r.save(&mem.header().randState, &mem.header().randInc) + defer func() { + resp.Count = mem.header().count + ws.memMu <- mem + }() + if args.Limit > 0 && mem.header().count >= args.Limit { + resp.InternalErr = fmt.Sprintf("mem.header().count %d already exceeds args.Limit %d", mem.header().count, args.Limit) + return resp + } + + originalVals, err := unmarshalCorpusFile(mem.valueCopy()) + if err != nil { + resp.InternalErr = err.Error() + return resp + } + vals := make([]any, len(originalVals)) + copy(vals, originalVals) + + shouldStop := func() bool { + return args.Limit > 0 && mem.header().count >= args.Limit + } + fuzzOnce := func(entry CorpusEntry) (dur time.Duration, cov []byte, errMsg string) { + mem.header().count++ + var err error + dur, err = ws.fuzzFn(entry) + if err != nil { + errMsg = err.Error() + if errMsg == "" { + errMsg = "fuzz function failed with no input" + } + return dur, nil, errMsg + } + if ws.coverageMask != nil && countNewCoverageBits(ws.coverageMask, coverageSnapshot) > 0 { + return dur, coverageSnapshot, "" + } + return dur, nil, "" + } + + if args.Warmup { + dur, _, errMsg := fuzzOnce(CorpusEntry{Values: vals}) + if errMsg != "" { + resp.Err = errMsg + return resp + } + resp.InterestingDuration = dur + if coverageEnabled { + resp.CoverageData = coverageSnapshot + } + return resp + } + + for { + select { + case <-ctx.Done(): + return resp + default: + if mem.header().count%chainedMutations == 0 { + copy(vals, originalVals) + ws.m.r.save(&mem.header().randState, &mem.header().randInc) + } + ws.m.mutate(vals, cap(mem.valueRef())) + + entry := CorpusEntry{Values: vals} + dur, cov, errMsg := fuzzOnce(entry) + if errMsg != "" { + resp.Err = errMsg + return resp + } + if cov != nil { + resp.CoverageData = cov + resp.InterestingDuration = dur + return resp + } + if shouldStop() { + return resp + } + } + } +} + +func (ws *workerServer) minimize(ctx context.Context, args minimizeArgs) (resp minimizeResponse) { + start := time.Now() + defer func() { resp.Duration = time.Since(start) }() + mem := <-ws.memMu + defer func() { ws.memMu <- mem }() + vals, err := unmarshalCorpusFile(mem.valueCopy()) + if err != nil { + panic(err) + } + inpHash := sha256.Sum256(mem.valueCopy()) + if args.Timeout != 0 { + var cancel func() + ctx, cancel = context.WithTimeout(ctx, args.Timeout) + defer cancel() + } + + // Minimize the values in vals, then write to shared memory. We only write + // to shared memory after completing minimization. + success, err := ws.minimizeInput(ctx, vals, mem, args) + if success { + writeToMem(vals, mem) + outHash := sha256.Sum256(mem.valueCopy()) + mem.header().rawInMem = false + resp.WroteToMem = true + if err != nil { + resp.Err = err.Error() + } else { + // If the values didn't change during minimization then coverageSnapshot is likely + // a dirty snapshot which represents the very last step of minimization, not the + // coverage for the initial input. In that case just return the coverage we were + // given initially, since it more accurately represents the coverage map for the + // input we are returning. + if outHash != inpHash { + resp.CoverageData = coverageSnapshot + } else { + resp.CoverageData = args.KeepCoverage + } + } + } + return resp +} + +// minimizeInput applies a series of minimizing transformations on the provided +// vals, ensuring that each minimization still causes an error, or keeps +// coverage, in fuzzFn. It uses the context to determine how long to run, +// stopping once closed. It returns a bool indicating whether minimization was +// successful and an error if one was found. +func (ws *workerServer) minimizeInput(ctx context.Context, vals []any, mem *sharedMem, args minimizeArgs) (success bool, retErr error) { + keepCoverage := args.KeepCoverage + memBytes := mem.valueRef() + bPtr := &memBytes + count := &mem.header().count + shouldStop := func() bool { + return ctx.Err() != nil || + (args.Limit > 0 && *count >= args.Limit) + } + if shouldStop() { + return false, nil + } + + // Check that the original value preserves coverage or causes an error. + // If not, then whatever caused us to think the value was interesting may + // have been a flake, and we can't minimize it. + *count++ + _, retErr = ws.fuzzFn(CorpusEntry{Values: vals}) + if keepCoverage != nil { + if !hasCoverageBit(keepCoverage, coverageSnapshot) || retErr != nil { + return false, nil + } + } else if retErr == nil { + return false, nil + } + mem.header().rawInMem = true + + // tryMinimized runs the fuzz function with candidate replacing the value + // at index valI. tryMinimized returns whether the input with candidate is + // interesting for the same reason as the original input: it returns + // an error if one was expected, or it preserves coverage. + tryMinimized := func(candidate []byte) bool { + prev := vals[args.Index] + switch prev.(type) { + case []byte: + vals[args.Index] = candidate + case string: + vals[args.Index] = string(candidate) + default: + panic("impossible") + } + copy(*bPtr, candidate) + *bPtr = (*bPtr)[:len(candidate)] + mem.setValueLen(len(candidate)) + *count++ + _, err := ws.fuzzFn(CorpusEntry{Values: vals}) + if err != nil { + retErr = err + if keepCoverage != nil { + // Now that we've found a crash, that's more important than any + // minimization of interesting inputs that was being done. Clear out + // keepCoverage to only minimize the crash going forward. + keepCoverage = nil + } + return true + } + // Minimization should preserve coverage bits. + if keepCoverage != nil && isCoverageSubset(keepCoverage, coverageSnapshot) { + return true + } + vals[args.Index] = prev + return false + } + switch v := vals[args.Index].(type) { + case string: + minimizeBytes([]byte(v), tryMinimized, shouldStop) + case []byte: + minimizeBytes(v, tryMinimized, shouldStop) + default: + panic("impossible") + } + return true, retErr +} + +func writeToMem(vals []any, mem *sharedMem) { + b := marshalCorpusFile(vals...) + mem.setValue(b) +} + +// ping does nothing. The coordinator calls this method to ensure the worker +// has called F.Fuzz and can communicate. +func (ws *workerServer) ping(ctx context.Context, args pingArgs) pingResponse { + return pingResponse{} +} + +// workerClient is a minimalist RPC client. The coordinator process uses a +// workerClient to call methods in each worker process (handled by +// workerServer). +type workerClient struct { + workerComm + m *mutator + + // mu is the mutex protecting the workerComm.fuzzIn pipe. This must be + // locked before making calls to the workerServer. It prevents + // workerClient.Close from closing fuzzIn while workerClient methods are + // writing to it concurrently, and prevents multiple callers from writing to + // fuzzIn concurrently. + mu sync.Mutex +} + +func newWorkerClient(comm workerComm, m *mutator) *workerClient { + return &workerClient{workerComm: comm, m: m} +} + +// Close shuts down the connection to the RPC server (the worker process) by +// closing fuzz_in. Close drains fuzz_out (avoiding a SIGPIPE in the worker), +// and closes it after the worker process closes the other end. +func (wc *workerClient) Close() error { + wc.mu.Lock() + defer wc.mu.Unlock() + + // Close fuzzIn. This signals to the server that there are no more calls, + // and it should exit. + if err := wc.fuzzIn.Close(); err != nil { + wc.fuzzOut.Close() + return err + } + + // Drain fuzzOut and close it. When the server exits, the kernel will close + // its end of fuzzOut, and we'll get EOF. + if _, err := io.Copy(io.Discard, wc.fuzzOut); err != nil { + wc.fuzzOut.Close() + return err + } + return wc.fuzzOut.Close() +} + +// errSharedMemClosed is returned by workerClient methods that cannot access +// shared memory because it was closed and unmapped by another goroutine. That +// can happen when worker.cleanup is called in the worker goroutine while a +// workerClient.fuzz call runs concurrently. +// +// This error should not be reported. It indicates the operation was +// interrupted. +var errSharedMemClosed = errors.New("internal error: shared memory was closed and unmapped") + +// minimize tells the worker to call the minimize method. See +// workerServer.minimize. +func (wc *workerClient) minimize(ctx context.Context, entryIn CorpusEntry, args minimizeArgs) (entryOut CorpusEntry, resp minimizeResponse, retErr error) { + wc.mu.Lock() + defer wc.mu.Unlock() + + mem, ok := <-wc.memMu + if !ok { + return CorpusEntry{}, minimizeResponse{}, errSharedMemClosed + } + defer func() { wc.memMu <- mem }() + mem.header().count = 0 + inp, err := corpusEntryData(entryIn) + if err != nil { + return CorpusEntry{}, minimizeResponse{}, err + } + mem.setValue(inp) + entryOut = entryIn + entryOut.Values, err = unmarshalCorpusFile(inp) + if err != nil { + return CorpusEntry{}, minimizeResponse{}, fmt.Errorf("workerClient.minimize unmarshaling provided value: %v", err) + } + for i, v := range entryOut.Values { + if !isMinimizable(reflect.TypeOf(v)) { + continue + } + + wc.memMu <- mem + args.Index = i + c := call{Minimize: &args} + callErr := wc.callLocked(ctx, c, &resp) + mem, ok = <-wc.memMu + if !ok { + return CorpusEntry{}, minimizeResponse{}, errSharedMemClosed + } + + if callErr != nil { + retErr = callErr + if !mem.header().rawInMem { + // An unrecoverable error occurred before minimization began. + return entryIn, minimizeResponse{}, retErr + } + // An unrecoverable error occurred during minimization. mem now + // holds the raw, unmarshaled bytes of entryIn.Values[i] that + // caused the error. + switch entryOut.Values[i].(type) { + case string: + entryOut.Values[i] = string(mem.valueCopy()) + case []byte: + entryOut.Values[i] = mem.valueCopy() + default: + panic("impossible") + } + entryOut.Data = marshalCorpusFile(entryOut.Values...) + // Stop minimizing; another unrecoverable error is likely to occur. + break + } + + if resp.WroteToMem { + // Minimization succeeded, and mem holds the marshaled data. + entryOut.Data = mem.valueCopy() + entryOut.Values, err = unmarshalCorpusFile(entryOut.Data) + if err != nil { + return CorpusEntry{}, minimizeResponse{}, fmt.Errorf("workerClient.minimize unmarshaling minimized value: %v", err) + } + } + + // Prepare for next iteration of the loop. + if args.Timeout != 0 { + args.Timeout -= resp.Duration + if args.Timeout <= 0 { + break + } + } + if args.Limit != 0 { + args.Limit -= mem.header().count + if args.Limit <= 0 { + break + } + } + } + resp.Count = mem.header().count + h := sha256.Sum256(entryOut.Data) + entryOut.Path = fmt.Sprintf("%x", h[:4]) + return entryOut, resp, retErr +} + +// fuzz tells the worker to call the fuzz method. See workerServer.fuzz. +func (wc *workerClient) fuzz(ctx context.Context, entryIn CorpusEntry, args fuzzArgs) (entryOut CorpusEntry, resp fuzzResponse, isInternalError bool, err error) { + wc.mu.Lock() + defer wc.mu.Unlock() + + mem, ok := <-wc.memMu + if !ok { + return CorpusEntry{}, fuzzResponse{}, true, errSharedMemClosed + } + mem.header().count = 0 + inp, err := corpusEntryData(entryIn) + if err != nil { + wc.memMu <- mem + return CorpusEntry{}, fuzzResponse{}, true, err + } + mem.setValue(inp) + wc.memMu <- mem + + c := call{Fuzz: &args} + callErr := wc.callLocked(ctx, c, &resp) + if resp.InternalErr != "" { + return CorpusEntry{}, fuzzResponse{}, true, errors.New(resp.InternalErr) + } + mem, ok = <-wc.memMu + if !ok { + return CorpusEntry{}, fuzzResponse{}, true, errSharedMemClosed + } + defer func() { wc.memMu <- mem }() + resp.Count = mem.header().count + + if !bytes.Equal(inp, mem.valueRef()) { + return CorpusEntry{}, fuzzResponse{}, true, errors.New("workerServer.fuzz modified input") + } + needEntryOut := callErr != nil || resp.Err != "" || + (!args.Warmup && resp.CoverageData != nil) + if needEntryOut { + valuesOut, err := unmarshalCorpusFile(inp) + if err != nil { + return CorpusEntry{}, fuzzResponse{}, true, fmt.Errorf("unmarshaling fuzz input value after call: %v", err) + } + wc.m.r.restore(mem.header().randState, mem.header().randInc) + if !args.Warmup { + // Only mutate the valuesOut if fuzzing actually occurred. + numMutations := ((resp.Count - 1) % chainedMutations) + 1 + for i := int64(0); i < numMutations; i++ { + wc.m.mutate(valuesOut, cap(mem.valueRef())) + } + } + dataOut := marshalCorpusFile(valuesOut...) + + h := sha256.Sum256(dataOut) + name := fmt.Sprintf("%x", h[:4]) + entryOut = CorpusEntry{ + Parent: entryIn.Path, + Path: name, + Data: dataOut, + Generation: entryIn.Generation + 1, + } + if args.Warmup { + // The bytes weren't mutated, so if entryIn was a seed corpus value, + // then entryOut is too. + entryOut.IsSeed = entryIn.IsSeed + } + } + + return entryOut, resp, false, callErr +} + +// ping tells the worker to call the ping method. See workerServer.ping. +func (wc *workerClient) ping(ctx context.Context) error { + wc.mu.Lock() + defer wc.mu.Unlock() + c := call{Ping: &pingArgs{}} + var resp pingResponse + return wc.callLocked(ctx, c, &resp) +} + +// callLocked sends an RPC from the coordinator to the worker process and waits +// for the response. The callLocked may be canceled with ctx. +func (wc *workerClient) callLocked(ctx context.Context, c call, resp any) (err error) { + enc := json.NewEncoder(wc.fuzzIn) + dec := json.NewDecoder(&contextReader{ctx: ctx, r: wc.fuzzOut}) + if err := enc.Encode(c); err != nil { + return err + } + return dec.Decode(resp) +} + +// contextReader wraps a Reader with a Context. If the context is canceled +// while the underlying reader is blocked, Read returns immediately. +// +// This is useful for reading from a pipe. Closing a pipe file descriptor does +// not unblock pending Reads on that file descriptor. All copies of the pipe's +// other file descriptor (the write end) must be closed in all processes that +// inherit it. This is difficult to do correctly in the situation we care about +// (process group termination). +type contextReader struct { + ctx context.Context + r io.Reader +} + +func (cr *contextReader) Read(b []byte) (int, error) { + if ctxErr := cr.ctx.Err(); ctxErr != nil { + return 0, ctxErr + } + done := make(chan struct{}) + + // This goroutine may stay blocked after Read returns because the underlying + // read is blocked. + var n int + var err error + go func() { + n, err = cr.r.Read(b) + close(done) + }() + + select { + case <-cr.ctx.Done(): + return 0, cr.ctx.Err() + case <-done: + return n, err + } +} diff --git a/testing/internal/fuzz/worker_test.go b/testing/internal/fuzz/worker_test.go new file mode 100644 index 0000000..05cde2a --- /dev/null +++ b/testing/internal/fuzz/worker_test.go @@ -0,0 +1,207 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package fuzz + +import ( + "context" + "errors" + "flag" + "fmt" + "io" + "os" + "os/signal" + "reflect" + "strconv" + "testing" + "time" + + "github.com/CodSpeedHQ/codspeed-go/testing/internal/race" +) + +var benchmarkWorkerFlag = flag.Bool("benchmarkworker", false, "") + +func TestMain(m *testing.M) { + flag.Parse() + if *benchmarkWorkerFlag { + runBenchmarkWorker() + return + } + os.Exit(m.Run()) +} + +func BenchmarkWorkerFuzzOverhead(b *testing.B) { + if race.Enabled { + b.Skip("TODO(48504): fix and re-enable") + } + origEnv := os.Getenv("GODEBUG") + defer func() { os.Setenv("GODEBUG", origEnv) }() + os.Setenv("GODEBUG", fmt.Sprintf("%s,fuzzseed=123", origEnv)) + + ws := &workerServer{ + fuzzFn: func(_ CorpusEntry) (time.Duration, error) { return time.Second, nil }, + workerComm: workerComm{memMu: make(chan *sharedMem, 1)}, + } + + mem, err := sharedMemTempFile(workerSharedMemSize) + if err != nil { + b.Fatalf("failed to create temporary shared memory file: %s", err) + } + defer func() { + if err := mem.Close(); err != nil { + b.Error(err) + } + }() + + initialVal := []any{make([]byte, 32)} + encodedVals := marshalCorpusFile(initialVal...) + mem.setValue(encodedVals) + + ws.memMu <- mem + + b.ResetTimer() + for i := 0; i < b.N; i++ { + ws.m = newMutator() + mem.setValue(encodedVals) + mem.header().count = 0 + + ws.fuzz(context.Background(), fuzzArgs{Limit: 1}) + } +} + +// BenchmarkWorkerPing acts as the coordinator and measures the time it takes +// a worker to respond to N pings. This is a rough measure of our RPC latency. +func BenchmarkWorkerPing(b *testing.B) { + if race.Enabled { + b.Skip("TODO(48504): fix and re-enable") + } + b.SetParallelism(1) + w := newWorkerForTest(b) + for i := 0; i < b.N; i++ { + if err := w.client.ping(context.Background()); err != nil { + b.Fatal(err) + } + } +} + +// BenchmarkWorkerFuzz acts as the coordinator and measures the time it takes +// a worker to mutate a given input and call a trivial fuzz function N times. +func BenchmarkWorkerFuzz(b *testing.B) { + if race.Enabled { + b.Skip("TODO(48504): fix and re-enable") + } + b.SetParallelism(1) + w := newWorkerForTest(b) + entry := CorpusEntry{Values: []any{[]byte(nil)}} + entry.Data = marshalCorpusFile(entry.Values...) + for i := int64(0); i < int64(b.N); { + args := fuzzArgs{ + Limit: int64(b.N) - i, + Timeout: workerFuzzDuration, + } + _, resp, _, err := w.client.fuzz(context.Background(), entry, args) + if err != nil { + b.Fatal(err) + } + if resp.Err != "" { + b.Fatal(resp.Err) + } + if resp.Count == 0 { + b.Fatal("worker did not make progress") + } + i += resp.Count + } +} + +// newWorkerForTest creates and starts a worker process for testing or +// benchmarking. The worker process calls RunFuzzWorker, which responds to +// RPC messages until it's stopped. The process is stopped and cleaned up +// automatically when the test is done. +func newWorkerForTest(tb testing.TB) *worker { + tb.Helper() + c, err := newCoordinator(CoordinateFuzzingOpts{ + Types: []reflect.Type{reflect.TypeOf([]byte(nil))}, + Log: io.Discard, + }) + if err != nil { + tb.Fatal(err) + } + dir := "" // same as self + binPath := os.Args[0] // same as self + args := append(os.Args[1:], "-benchmarkworker") + env := os.Environ() // same as self + w, err := newWorker(c, dir, binPath, args, env) + if err != nil { + tb.Fatal(err) + } + tb.Cleanup(func() { + if err := w.cleanup(); err != nil { + tb.Error(err) + } + }) + if err := w.startAndPing(context.Background()); err != nil { + tb.Fatal(err) + } + tb.Cleanup(func() { + if err := w.stop(); err != nil { + tb.Error(err) + } + }) + return w +} + +func runBenchmarkWorker() { + ctx, cancel := signal.NotifyContext(context.Background(), os.Interrupt) + defer cancel() + fn := func(CorpusEntry) error { return nil } + if err := RunFuzzWorker(ctx, fn); err != nil && err != ctx.Err() { + panic(err) + } +} + +func BenchmarkWorkerMinimize(b *testing.B) { + if race.Enabled { + b.Skip("TODO(48504): fix and re-enable") + } + + ws := &workerServer{ + workerComm: workerComm{memMu: make(chan *sharedMem, 1)}, + } + + mem, err := sharedMemTempFile(workerSharedMemSize) + if err != nil { + b.Fatalf("failed to create temporary shared memory file: %s", err) + } + defer func() { + if err := mem.Close(); err != nil { + b.Error(err) + } + }() + ws.memMu <- mem + + bytes := make([]byte, 1024) + ctx := context.Background() + for sz := 1; sz <= len(bytes); sz <<= 1 { + sz := sz + input := []any{bytes[:sz]} + encodedVals := marshalCorpusFile(input...) + mem = <-ws.memMu + mem.setValue(encodedVals) + ws.memMu <- mem + b.Run(strconv.Itoa(sz), func(b *testing.B) { + i := 0 + ws.fuzzFn = func(_ CorpusEntry) (time.Duration, error) { + if i == 0 { + i++ + return time.Second, errors.New("initial failure for deflake") + } + return time.Second, nil + } + for i := 0; i < b.N; i++ { + b.SetBytes(int64(sz)) + ws.minimize(ctx, minimizeArgs{}) + } + }) + } +} diff --git a/testing/internal/goarch/gengoarch.go b/testing/internal/goarch/gengoarch.go new file mode 100644 index 0000000..a52936e --- /dev/null +++ b/testing/internal/goarch/gengoarch.go @@ -0,0 +1,60 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build ignore + +package main + +import ( + "bytes" + "fmt" + "log" + "os" + "strings" +) + +var goarches []string + +func main() { + data, err := os.ReadFile("../../internal/syslist/syslist.go") + if err != nil { + log.Fatal(err) + } + const goarchPrefix = `var KnownArch = map[string]bool{` + inGOARCH := false + for _, line := range strings.Split(string(data), "\n") { + if strings.HasPrefix(line, goarchPrefix) { + inGOARCH = true + } else if inGOARCH && strings.HasPrefix(line, "}") { + break + } else if inGOARCH { + goarch := strings.Fields(line)[0] + goarch = strings.TrimPrefix(goarch, `"`) + goarch = strings.TrimSuffix(goarch, `":`) + goarches = append(goarches, goarch) + } + } + + for _, target := range goarches { + if target == "amd64p32" { + continue + } + var buf bytes.Buffer + fmt.Fprintf(&buf, "// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT.\n\n") + fmt.Fprintf(&buf, "//go:build %s\n\n", target) // must explicitly include target for bootstrapping purposes + fmt.Fprintf(&buf, "package goarch\n\n") + fmt.Fprintf(&buf, "const GOARCH = `%s`\n\n", target) + for _, goarch := range goarches { + value := 0 + if goarch == target { + value = 1 + } + fmt.Fprintf(&buf, "const Is%s = %d\n", strings.Title(goarch), value) + } + err := os.WriteFile("zgoarch_"+target+".go", buf.Bytes(), 0666) + if err != nil { + log.Fatal(err) + } + } +} diff --git a/testing/internal/goarch/goarch.go b/testing/internal/goarch/goarch.go new file mode 100644 index 0000000..3dda62f --- /dev/null +++ b/testing/internal/goarch/goarch.go @@ -0,0 +1,60 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// package goarch contains GOARCH-specific constants. +package goarch + +// The next line makes 'go generate' write the zgoarch*.go files with +// per-arch information, including constants named $GOARCH for every +// GOARCH. The constant is 1 on the current system, 0 otherwise; multiplying +// by them is useful for defining GOARCH-specific constants. +// +//go:generate go run gengoarch.go + +type ArchFamilyType int + +const ( + AMD64 ArchFamilyType = iota + ARM + ARM64 + I386 + LOONG64 + MIPS + MIPS64 + PPC64 + RISCV64 + S390X + WASM +) + +// PtrSize is the size of a pointer in bytes - unsafe.Sizeof(uintptr(0)) but as an ideal constant. +// It is also the size of the machine's native word size (that is, 4 on 32-bit systems, 8 on 64-bit). +const PtrSize = 4 << (^uintptr(0) >> 63) + +// ArchFamily is the architecture family (AMD64, ARM, ...) +const ArchFamily ArchFamilyType = _ArchFamily + +// BigEndian reports whether the architecture is big-endian. +const BigEndian = IsArmbe|IsArm64be|IsMips|IsMips64|IsPpc|IsPpc64|IsS390|IsS390x|IsSparc|IsSparc64 == 1 + +// DefaultPhysPageSize is the default physical page size. +const DefaultPhysPageSize = _DefaultPhysPageSize + +// PCQuantum is the minimal unit for a program counter (1 on x86, 4 on most other systems). +// The various PC tables record PC deltas pre-divided by PCQuantum. +const PCQuantum = _PCQuantum + +// Int64Align is the required alignment for a 64-bit integer (4 on 32-bit systems, 8 on 64-bit). +const Int64Align = PtrSize + +// MinFrameSize is the size of the system-reserved words at the bottom +// of a frame (just above the architectural stack pointer). +// It is zero on x86 and PtrSize on most non-x86 (LR-based) systems. +// On PowerPC it is larger, to cover three more reserved words: +// the compiler word, the link editor word, and the TOC save word. +const MinFrameSize = _MinFrameSize + +// StackAlign is the required alignment of the SP register. +// The stack must be at least word aligned, but some architectures require more. +const StackAlign = _StackAlign diff --git a/testing/internal/goarch/goarch_386.go b/testing/internal/goarch/goarch_386.go new file mode 100644 index 0000000..c621421 --- /dev/null +++ b/testing/internal/goarch/goarch_386.go @@ -0,0 +1,13 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package goarch + +const ( + _ArchFamily = I386 + _DefaultPhysPageSize = 4096 + _PCQuantum = 1 + _MinFrameSize = 0 + _StackAlign = PtrSize +) diff --git a/testing/internal/goarch/goarch_amd64.go b/testing/internal/goarch/goarch_amd64.go new file mode 100644 index 0000000..911e3e7 --- /dev/null +++ b/testing/internal/goarch/goarch_amd64.go @@ -0,0 +1,13 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package goarch + +const ( + _ArchFamily = AMD64 + _DefaultPhysPageSize = 4096 + _PCQuantum = 1 + _MinFrameSize = 0 + _StackAlign = PtrSize +) diff --git a/testing/internal/goarch/goarch_arm.go b/testing/internal/goarch/goarch_arm.go new file mode 100644 index 0000000..a659171 --- /dev/null +++ b/testing/internal/goarch/goarch_arm.go @@ -0,0 +1,13 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package goarch + +const ( + _ArchFamily = ARM + _DefaultPhysPageSize = 65536 + _PCQuantum = 4 + _MinFrameSize = 4 + _StackAlign = PtrSize +) diff --git a/testing/internal/goarch/goarch_arm64.go b/testing/internal/goarch/goarch_arm64.go new file mode 100644 index 0000000..85d0b47 --- /dev/null +++ b/testing/internal/goarch/goarch_arm64.go @@ -0,0 +1,13 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package goarch + +const ( + _ArchFamily = ARM64 + _DefaultPhysPageSize = 65536 + _PCQuantum = 4 + _MinFrameSize = 8 + _StackAlign = 16 +) diff --git a/testing/internal/goarch/goarch_loong64.go b/testing/internal/goarch/goarch_loong64.go new file mode 100644 index 0000000..dae1f4d --- /dev/null +++ b/testing/internal/goarch/goarch_loong64.go @@ -0,0 +1,15 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build loong64 + +package goarch + +const ( + _ArchFamily = LOONG64 + _DefaultPhysPageSize = 16384 + _PCQuantum = 4 + _MinFrameSize = 8 + _StackAlign = PtrSize +) diff --git a/testing/internal/goarch/goarch_mips.go b/testing/internal/goarch/goarch_mips.go new file mode 100644 index 0000000..59f3995 --- /dev/null +++ b/testing/internal/goarch/goarch_mips.go @@ -0,0 +1,13 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package goarch + +const ( + _ArchFamily = MIPS + _DefaultPhysPageSize = 65536 + _PCQuantum = 4 + _MinFrameSize = 4 + _StackAlign = PtrSize +) diff --git a/testing/internal/goarch/goarch_mips64.go b/testing/internal/goarch/goarch_mips64.go new file mode 100644 index 0000000..9e4f827 --- /dev/null +++ b/testing/internal/goarch/goarch_mips64.go @@ -0,0 +1,13 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package goarch + +const ( + _ArchFamily = MIPS64 + _DefaultPhysPageSize = 16384 + _PCQuantum = 4 + _MinFrameSize = 8 + _StackAlign = PtrSize +) diff --git a/testing/internal/goarch/goarch_mips64le.go b/testing/internal/goarch/goarch_mips64le.go new file mode 100644 index 0000000..9e4f827 --- /dev/null +++ b/testing/internal/goarch/goarch_mips64le.go @@ -0,0 +1,13 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package goarch + +const ( + _ArchFamily = MIPS64 + _DefaultPhysPageSize = 16384 + _PCQuantum = 4 + _MinFrameSize = 8 + _StackAlign = PtrSize +) diff --git a/testing/internal/goarch/goarch_mipsle.go b/testing/internal/goarch/goarch_mipsle.go new file mode 100644 index 0000000..3e6642b --- /dev/null +++ b/testing/internal/goarch/goarch_mipsle.go @@ -0,0 +1,13 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package goarch + +const ( + _ArchFamily = MIPS + _DefaultPhysPageSize = 65536 + _PCQuantum = 4 + _MinFrameSize = 4 + _StackAlign = PtrSize +) diff --git a/testing/internal/goarch/goarch_ppc64.go b/testing/internal/goarch/goarch_ppc64.go new file mode 100644 index 0000000..60cc846 --- /dev/null +++ b/testing/internal/goarch/goarch_ppc64.go @@ -0,0 +1,13 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package goarch + +const ( + _ArchFamily = PPC64 + _DefaultPhysPageSize = 65536 + _PCQuantum = 4 + _MinFrameSize = 32 + _StackAlign = 16 +) diff --git a/testing/internal/goarch/goarch_ppc64le.go b/testing/internal/goarch/goarch_ppc64le.go new file mode 100644 index 0000000..60cc846 --- /dev/null +++ b/testing/internal/goarch/goarch_ppc64le.go @@ -0,0 +1,13 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package goarch + +const ( + _ArchFamily = PPC64 + _DefaultPhysPageSize = 65536 + _PCQuantum = 4 + _MinFrameSize = 32 + _StackAlign = 16 +) diff --git a/testing/internal/goarch/goarch_riscv64.go b/testing/internal/goarch/goarch_riscv64.go new file mode 100644 index 0000000..3b6da1e --- /dev/null +++ b/testing/internal/goarch/goarch_riscv64.go @@ -0,0 +1,13 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package goarch + +const ( + _ArchFamily = RISCV64 + _DefaultPhysPageSize = 4096 + _PCQuantum = 4 + _MinFrameSize = 8 + _StackAlign = PtrSize +) diff --git a/testing/internal/goarch/goarch_s390x.go b/testing/internal/goarch/goarch_s390x.go new file mode 100644 index 0000000..20c5705 --- /dev/null +++ b/testing/internal/goarch/goarch_s390x.go @@ -0,0 +1,13 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package goarch + +const ( + _ArchFamily = S390X + _DefaultPhysPageSize = 4096 + _PCQuantum = 2 + _MinFrameSize = 8 + _StackAlign = PtrSize +) diff --git a/testing/internal/goarch/goarch_wasm.go b/testing/internal/goarch/goarch_wasm.go new file mode 100644 index 0000000..98618d6 --- /dev/null +++ b/testing/internal/goarch/goarch_wasm.go @@ -0,0 +1,13 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package goarch + +const ( + _ArchFamily = WASM + _DefaultPhysPageSize = 65536 + _PCQuantum = 1 + _MinFrameSize = 0 + _StackAlign = PtrSize +) diff --git a/testing/internal/goarch/zgoarch_386.go b/testing/internal/goarch/zgoarch_386.go new file mode 100644 index 0000000..4a9b0e6 --- /dev/null +++ b/testing/internal/goarch/zgoarch_386.go @@ -0,0 +1,32 @@ +// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT. + +//go:build 386 + +package goarch + +const GOARCH = `386` + +const Is386 = 1 +const IsAmd64 = 0 +const IsAmd64p32 = 0 +const IsArm = 0 +const IsArmbe = 0 +const IsArm64 = 0 +const IsArm64be = 0 +const IsLoong64 = 0 +const IsMips = 0 +const IsMipsle = 0 +const IsMips64 = 0 +const IsMips64le = 0 +const IsMips64p32 = 0 +const IsMips64p32le = 0 +const IsPpc = 0 +const IsPpc64 = 0 +const IsPpc64le = 0 +const IsRiscv = 0 +const IsRiscv64 = 0 +const IsS390 = 0 +const IsS390x = 0 +const IsSparc = 0 +const IsSparc64 = 0 +const IsWasm = 0 diff --git a/testing/internal/goarch/zgoarch_amd64.go b/testing/internal/goarch/zgoarch_amd64.go new file mode 100644 index 0000000..7926392 --- /dev/null +++ b/testing/internal/goarch/zgoarch_amd64.go @@ -0,0 +1,32 @@ +// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT. + +//go:build amd64 + +package goarch + +const GOARCH = `amd64` + +const Is386 = 0 +const IsAmd64 = 1 +const IsAmd64p32 = 0 +const IsArm = 0 +const IsArmbe = 0 +const IsArm64 = 0 +const IsArm64be = 0 +const IsLoong64 = 0 +const IsMips = 0 +const IsMipsle = 0 +const IsMips64 = 0 +const IsMips64le = 0 +const IsMips64p32 = 0 +const IsMips64p32le = 0 +const IsPpc = 0 +const IsPpc64 = 0 +const IsPpc64le = 0 +const IsRiscv = 0 +const IsRiscv64 = 0 +const IsS390 = 0 +const IsS390x = 0 +const IsSparc = 0 +const IsSparc64 = 0 +const IsWasm = 0 diff --git a/testing/internal/goarch/zgoarch_arm.go b/testing/internal/goarch/zgoarch_arm.go new file mode 100644 index 0000000..6c03b8b --- /dev/null +++ b/testing/internal/goarch/zgoarch_arm.go @@ -0,0 +1,32 @@ +// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT. + +//go:build arm + +package goarch + +const GOARCH = `arm` + +const Is386 = 0 +const IsAmd64 = 0 +const IsAmd64p32 = 0 +const IsArm = 1 +const IsArmbe = 0 +const IsArm64 = 0 +const IsArm64be = 0 +const IsLoong64 = 0 +const IsMips = 0 +const IsMipsle = 0 +const IsMips64 = 0 +const IsMips64le = 0 +const IsMips64p32 = 0 +const IsMips64p32le = 0 +const IsPpc = 0 +const IsPpc64 = 0 +const IsPpc64le = 0 +const IsRiscv = 0 +const IsRiscv64 = 0 +const IsS390 = 0 +const IsS390x = 0 +const IsSparc = 0 +const IsSparc64 = 0 +const IsWasm = 0 diff --git a/testing/internal/goarch/zgoarch_arm64.go b/testing/internal/goarch/zgoarch_arm64.go new file mode 100644 index 0000000..ad342d7 --- /dev/null +++ b/testing/internal/goarch/zgoarch_arm64.go @@ -0,0 +1,32 @@ +// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT. + +//go:build arm64 + +package goarch + +const GOARCH = `arm64` + +const Is386 = 0 +const IsAmd64 = 0 +const IsAmd64p32 = 0 +const IsArm = 0 +const IsArmbe = 0 +const IsArm64 = 1 +const IsArm64be = 0 +const IsLoong64 = 0 +const IsMips = 0 +const IsMipsle = 0 +const IsMips64 = 0 +const IsMips64le = 0 +const IsMips64p32 = 0 +const IsMips64p32le = 0 +const IsPpc = 0 +const IsPpc64 = 0 +const IsPpc64le = 0 +const IsRiscv = 0 +const IsRiscv64 = 0 +const IsS390 = 0 +const IsS390x = 0 +const IsSparc = 0 +const IsSparc64 = 0 +const IsWasm = 0 diff --git a/testing/internal/goarch/zgoarch_arm64be.go b/testing/internal/goarch/zgoarch_arm64be.go new file mode 100644 index 0000000..0f26003 --- /dev/null +++ b/testing/internal/goarch/zgoarch_arm64be.go @@ -0,0 +1,32 @@ +// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT. + +//go:build arm64be + +package goarch + +const GOARCH = `arm64be` + +const Is386 = 0 +const IsAmd64 = 0 +const IsAmd64p32 = 0 +const IsArm = 0 +const IsArmbe = 0 +const IsArm64 = 0 +const IsArm64be = 1 +const IsLoong64 = 0 +const IsMips = 0 +const IsMipsle = 0 +const IsMips64 = 0 +const IsMips64le = 0 +const IsMips64p32 = 0 +const IsMips64p32le = 0 +const IsPpc = 0 +const IsPpc64 = 0 +const IsPpc64le = 0 +const IsRiscv = 0 +const IsRiscv64 = 0 +const IsS390 = 0 +const IsS390x = 0 +const IsSparc = 0 +const IsSparc64 = 0 +const IsWasm = 0 diff --git a/testing/internal/goarch/zgoarch_armbe.go b/testing/internal/goarch/zgoarch_armbe.go new file mode 100644 index 0000000..6092fee --- /dev/null +++ b/testing/internal/goarch/zgoarch_armbe.go @@ -0,0 +1,32 @@ +// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT. + +//go:build armbe + +package goarch + +const GOARCH = `armbe` + +const Is386 = 0 +const IsAmd64 = 0 +const IsAmd64p32 = 0 +const IsArm = 0 +const IsArmbe = 1 +const IsArm64 = 0 +const IsArm64be = 0 +const IsLoong64 = 0 +const IsMips = 0 +const IsMipsle = 0 +const IsMips64 = 0 +const IsMips64le = 0 +const IsMips64p32 = 0 +const IsMips64p32le = 0 +const IsPpc = 0 +const IsPpc64 = 0 +const IsPpc64le = 0 +const IsRiscv = 0 +const IsRiscv64 = 0 +const IsS390 = 0 +const IsS390x = 0 +const IsSparc = 0 +const IsSparc64 = 0 +const IsWasm = 0 diff --git a/testing/internal/goarch/zgoarch_loong64.go b/testing/internal/goarch/zgoarch_loong64.go new file mode 100644 index 0000000..21c67e1 --- /dev/null +++ b/testing/internal/goarch/zgoarch_loong64.go @@ -0,0 +1,32 @@ +// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT. + +//go:build loong64 + +package goarch + +const GOARCH = `loong64` + +const Is386 = 0 +const IsAmd64 = 0 +const IsAmd64p32 = 0 +const IsArm = 0 +const IsArmbe = 0 +const IsArm64 = 0 +const IsArm64be = 0 +const IsLoong64 = 1 +const IsMips = 0 +const IsMipsle = 0 +const IsMips64 = 0 +const IsMips64le = 0 +const IsMips64p32 = 0 +const IsMips64p32le = 0 +const IsPpc = 0 +const IsPpc64 = 0 +const IsPpc64le = 0 +const IsRiscv = 0 +const IsRiscv64 = 0 +const IsS390 = 0 +const IsS390x = 0 +const IsSparc = 0 +const IsSparc64 = 0 +const IsWasm = 0 diff --git a/testing/internal/goarch/zgoarch_mips.go b/testing/internal/goarch/zgoarch_mips.go new file mode 100644 index 0000000..0db1974 --- /dev/null +++ b/testing/internal/goarch/zgoarch_mips.go @@ -0,0 +1,32 @@ +// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT. + +//go:build mips + +package goarch + +const GOARCH = `mips` + +const Is386 = 0 +const IsAmd64 = 0 +const IsAmd64p32 = 0 +const IsArm = 0 +const IsArmbe = 0 +const IsArm64 = 0 +const IsArm64be = 0 +const IsLoong64 = 0 +const IsMips = 1 +const IsMipsle = 0 +const IsMips64 = 0 +const IsMips64le = 0 +const IsMips64p32 = 0 +const IsMips64p32le = 0 +const IsPpc = 0 +const IsPpc64 = 0 +const IsPpc64le = 0 +const IsRiscv = 0 +const IsRiscv64 = 0 +const IsS390 = 0 +const IsS390x = 0 +const IsSparc = 0 +const IsSparc64 = 0 +const IsWasm = 0 diff --git a/testing/internal/goarch/zgoarch_mips64.go b/testing/internal/goarch/zgoarch_mips64.go new file mode 100644 index 0000000..738806f --- /dev/null +++ b/testing/internal/goarch/zgoarch_mips64.go @@ -0,0 +1,32 @@ +// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT. + +//go:build mips64 + +package goarch + +const GOARCH = `mips64` + +const Is386 = 0 +const IsAmd64 = 0 +const IsAmd64p32 = 0 +const IsArm = 0 +const IsArmbe = 0 +const IsArm64 = 0 +const IsArm64be = 0 +const IsLoong64 = 0 +const IsMips = 0 +const IsMipsle = 0 +const IsMips64 = 1 +const IsMips64le = 0 +const IsMips64p32 = 0 +const IsMips64p32le = 0 +const IsPpc = 0 +const IsPpc64 = 0 +const IsPpc64le = 0 +const IsRiscv = 0 +const IsRiscv64 = 0 +const IsS390 = 0 +const IsS390x = 0 +const IsSparc = 0 +const IsSparc64 = 0 +const IsWasm = 0 diff --git a/testing/internal/goarch/zgoarch_mips64le.go b/testing/internal/goarch/zgoarch_mips64le.go new file mode 100644 index 0000000..8de5beb --- /dev/null +++ b/testing/internal/goarch/zgoarch_mips64le.go @@ -0,0 +1,32 @@ +// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT. + +//go:build mips64le + +package goarch + +const GOARCH = `mips64le` + +const Is386 = 0 +const IsAmd64 = 0 +const IsAmd64p32 = 0 +const IsArm = 0 +const IsArmbe = 0 +const IsArm64 = 0 +const IsArm64be = 0 +const IsLoong64 = 0 +const IsMips = 0 +const IsMipsle = 0 +const IsMips64 = 0 +const IsMips64le = 1 +const IsMips64p32 = 0 +const IsMips64p32le = 0 +const IsPpc = 0 +const IsPpc64 = 0 +const IsPpc64le = 0 +const IsRiscv = 0 +const IsRiscv64 = 0 +const IsS390 = 0 +const IsS390x = 0 +const IsSparc = 0 +const IsSparc64 = 0 +const IsWasm = 0 diff --git a/testing/internal/goarch/zgoarch_mips64p32.go b/testing/internal/goarch/zgoarch_mips64p32.go new file mode 100644 index 0000000..ea461be --- /dev/null +++ b/testing/internal/goarch/zgoarch_mips64p32.go @@ -0,0 +1,32 @@ +// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT. + +//go:build mips64p32 + +package goarch + +const GOARCH = `mips64p32` + +const Is386 = 0 +const IsAmd64 = 0 +const IsAmd64p32 = 0 +const IsArm = 0 +const IsArmbe = 0 +const IsArm64 = 0 +const IsArm64be = 0 +const IsLoong64 = 0 +const IsMips = 0 +const IsMipsle = 0 +const IsMips64 = 0 +const IsMips64le = 0 +const IsMips64p32 = 1 +const IsMips64p32le = 0 +const IsPpc = 0 +const IsPpc64 = 0 +const IsPpc64le = 0 +const IsRiscv = 0 +const IsRiscv64 = 0 +const IsS390 = 0 +const IsS390x = 0 +const IsSparc = 0 +const IsSparc64 = 0 +const IsWasm = 0 diff --git a/testing/internal/goarch/zgoarch_mips64p32le.go b/testing/internal/goarch/zgoarch_mips64p32le.go new file mode 100644 index 0000000..15473ce --- /dev/null +++ b/testing/internal/goarch/zgoarch_mips64p32le.go @@ -0,0 +1,32 @@ +// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT. + +//go:build mips64p32le + +package goarch + +const GOARCH = `mips64p32le` + +const Is386 = 0 +const IsAmd64 = 0 +const IsAmd64p32 = 0 +const IsArm = 0 +const IsArmbe = 0 +const IsArm64 = 0 +const IsArm64be = 0 +const IsLoong64 = 0 +const IsMips = 0 +const IsMipsle = 0 +const IsMips64 = 0 +const IsMips64le = 0 +const IsMips64p32 = 0 +const IsMips64p32le = 1 +const IsPpc = 0 +const IsPpc64 = 0 +const IsPpc64le = 0 +const IsRiscv = 0 +const IsRiscv64 = 0 +const IsS390 = 0 +const IsS390x = 0 +const IsSparc = 0 +const IsSparc64 = 0 +const IsWasm = 0 diff --git a/testing/internal/goarch/zgoarch_mipsle.go b/testing/internal/goarch/zgoarch_mipsle.go new file mode 100644 index 0000000..4955142 --- /dev/null +++ b/testing/internal/goarch/zgoarch_mipsle.go @@ -0,0 +1,32 @@ +// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT. + +//go:build mipsle + +package goarch + +const GOARCH = `mipsle` + +const Is386 = 0 +const IsAmd64 = 0 +const IsAmd64p32 = 0 +const IsArm = 0 +const IsArmbe = 0 +const IsArm64 = 0 +const IsArm64be = 0 +const IsLoong64 = 0 +const IsMips = 0 +const IsMipsle = 1 +const IsMips64 = 0 +const IsMips64le = 0 +const IsMips64p32 = 0 +const IsMips64p32le = 0 +const IsPpc = 0 +const IsPpc64 = 0 +const IsPpc64le = 0 +const IsRiscv = 0 +const IsRiscv64 = 0 +const IsS390 = 0 +const IsS390x = 0 +const IsSparc = 0 +const IsSparc64 = 0 +const IsWasm = 0 diff --git a/testing/internal/goarch/zgoarch_ppc.go b/testing/internal/goarch/zgoarch_ppc.go new file mode 100644 index 0000000..ec01763 --- /dev/null +++ b/testing/internal/goarch/zgoarch_ppc.go @@ -0,0 +1,32 @@ +// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT. + +//go:build ppc + +package goarch + +const GOARCH = `ppc` + +const Is386 = 0 +const IsAmd64 = 0 +const IsAmd64p32 = 0 +const IsArm = 0 +const IsArmbe = 0 +const IsArm64 = 0 +const IsArm64be = 0 +const IsLoong64 = 0 +const IsMips = 0 +const IsMipsle = 0 +const IsMips64 = 0 +const IsMips64le = 0 +const IsMips64p32 = 0 +const IsMips64p32le = 0 +const IsPpc = 1 +const IsPpc64 = 0 +const IsPpc64le = 0 +const IsRiscv = 0 +const IsRiscv64 = 0 +const IsS390 = 0 +const IsS390x = 0 +const IsSparc = 0 +const IsSparc64 = 0 +const IsWasm = 0 diff --git a/testing/internal/goarch/zgoarch_ppc64.go b/testing/internal/goarch/zgoarch_ppc64.go new file mode 100644 index 0000000..39be392 --- /dev/null +++ b/testing/internal/goarch/zgoarch_ppc64.go @@ -0,0 +1,32 @@ +// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT. + +//go:build ppc64 + +package goarch + +const GOARCH = `ppc64` + +const Is386 = 0 +const IsAmd64 = 0 +const IsAmd64p32 = 0 +const IsArm = 0 +const IsArmbe = 0 +const IsArm64 = 0 +const IsArm64be = 0 +const IsLoong64 = 0 +const IsMips = 0 +const IsMipsle = 0 +const IsMips64 = 0 +const IsMips64le = 0 +const IsMips64p32 = 0 +const IsMips64p32le = 0 +const IsPpc = 0 +const IsPpc64 = 1 +const IsPpc64le = 0 +const IsRiscv = 0 +const IsRiscv64 = 0 +const IsS390 = 0 +const IsS390x = 0 +const IsSparc = 0 +const IsSparc64 = 0 +const IsWasm = 0 diff --git a/testing/internal/goarch/zgoarch_ppc64le.go b/testing/internal/goarch/zgoarch_ppc64le.go new file mode 100644 index 0000000..5f959e0 --- /dev/null +++ b/testing/internal/goarch/zgoarch_ppc64le.go @@ -0,0 +1,32 @@ +// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT. + +//go:build ppc64le + +package goarch + +const GOARCH = `ppc64le` + +const Is386 = 0 +const IsAmd64 = 0 +const IsAmd64p32 = 0 +const IsArm = 0 +const IsArmbe = 0 +const IsArm64 = 0 +const IsArm64be = 0 +const IsLoong64 = 0 +const IsMips = 0 +const IsMipsle = 0 +const IsMips64 = 0 +const IsMips64le = 0 +const IsMips64p32 = 0 +const IsMips64p32le = 0 +const IsPpc = 0 +const IsPpc64 = 0 +const IsPpc64le = 1 +const IsRiscv = 0 +const IsRiscv64 = 0 +const IsS390 = 0 +const IsS390x = 0 +const IsSparc = 0 +const IsSparc64 = 0 +const IsWasm = 0 diff --git a/testing/internal/goarch/zgoarch_riscv.go b/testing/internal/goarch/zgoarch_riscv.go new file mode 100644 index 0000000..8d81a14 --- /dev/null +++ b/testing/internal/goarch/zgoarch_riscv.go @@ -0,0 +1,32 @@ +// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT. + +//go:build riscv + +package goarch + +const GOARCH = `riscv` + +const Is386 = 0 +const IsAmd64 = 0 +const IsAmd64p32 = 0 +const IsArm = 0 +const IsArmbe = 0 +const IsArm64 = 0 +const IsArm64be = 0 +const IsLoong64 = 0 +const IsMips = 0 +const IsMipsle = 0 +const IsMips64 = 0 +const IsMips64le = 0 +const IsMips64p32 = 0 +const IsMips64p32le = 0 +const IsPpc = 0 +const IsPpc64 = 0 +const IsPpc64le = 0 +const IsRiscv = 1 +const IsRiscv64 = 0 +const IsS390 = 0 +const IsS390x = 0 +const IsSparc = 0 +const IsSparc64 = 0 +const IsWasm = 0 diff --git a/testing/internal/goarch/zgoarch_riscv64.go b/testing/internal/goarch/zgoarch_riscv64.go new file mode 100644 index 0000000..1df989c --- /dev/null +++ b/testing/internal/goarch/zgoarch_riscv64.go @@ -0,0 +1,32 @@ +// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT. + +//go:build riscv64 + +package goarch + +const GOARCH = `riscv64` + +const Is386 = 0 +const IsAmd64 = 0 +const IsAmd64p32 = 0 +const IsArm = 0 +const IsArmbe = 0 +const IsArm64 = 0 +const IsArm64be = 0 +const IsLoong64 = 0 +const IsMips = 0 +const IsMipsle = 0 +const IsMips64 = 0 +const IsMips64le = 0 +const IsMips64p32 = 0 +const IsMips64p32le = 0 +const IsPpc = 0 +const IsPpc64 = 0 +const IsPpc64le = 0 +const IsRiscv = 0 +const IsRiscv64 = 1 +const IsS390 = 0 +const IsS390x = 0 +const IsSparc = 0 +const IsSparc64 = 0 +const IsWasm = 0 diff --git a/testing/internal/goarch/zgoarch_s390.go b/testing/internal/goarch/zgoarch_s390.go new file mode 100644 index 0000000..56815b9 --- /dev/null +++ b/testing/internal/goarch/zgoarch_s390.go @@ -0,0 +1,32 @@ +// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT. + +//go:build s390 + +package goarch + +const GOARCH = `s390` + +const Is386 = 0 +const IsAmd64 = 0 +const IsAmd64p32 = 0 +const IsArm = 0 +const IsArmbe = 0 +const IsArm64 = 0 +const IsArm64be = 0 +const IsLoong64 = 0 +const IsMips = 0 +const IsMipsle = 0 +const IsMips64 = 0 +const IsMips64le = 0 +const IsMips64p32 = 0 +const IsMips64p32le = 0 +const IsPpc = 0 +const IsPpc64 = 0 +const IsPpc64le = 0 +const IsRiscv = 0 +const IsRiscv64 = 0 +const IsS390 = 1 +const IsS390x = 0 +const IsSparc = 0 +const IsSparc64 = 0 +const IsWasm = 0 diff --git a/testing/internal/goarch/zgoarch_s390x.go b/testing/internal/goarch/zgoarch_s390x.go new file mode 100644 index 0000000..e61e9bd --- /dev/null +++ b/testing/internal/goarch/zgoarch_s390x.go @@ -0,0 +1,32 @@ +// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT. + +//go:build s390x + +package goarch + +const GOARCH = `s390x` + +const Is386 = 0 +const IsAmd64 = 0 +const IsAmd64p32 = 0 +const IsArm = 0 +const IsArmbe = 0 +const IsArm64 = 0 +const IsArm64be = 0 +const IsLoong64 = 0 +const IsMips = 0 +const IsMipsle = 0 +const IsMips64 = 0 +const IsMips64le = 0 +const IsMips64p32 = 0 +const IsMips64p32le = 0 +const IsPpc = 0 +const IsPpc64 = 0 +const IsPpc64le = 0 +const IsRiscv = 0 +const IsRiscv64 = 0 +const IsS390 = 0 +const IsS390x = 1 +const IsSparc = 0 +const IsSparc64 = 0 +const IsWasm = 0 diff --git a/testing/internal/goarch/zgoarch_sparc.go b/testing/internal/goarch/zgoarch_sparc.go new file mode 100644 index 0000000..ee5b746 --- /dev/null +++ b/testing/internal/goarch/zgoarch_sparc.go @@ -0,0 +1,32 @@ +// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT. + +//go:build sparc + +package goarch + +const GOARCH = `sparc` + +const Is386 = 0 +const IsAmd64 = 0 +const IsAmd64p32 = 0 +const IsArm = 0 +const IsArmbe = 0 +const IsArm64 = 0 +const IsArm64be = 0 +const IsLoong64 = 0 +const IsMips = 0 +const IsMipsle = 0 +const IsMips64 = 0 +const IsMips64le = 0 +const IsMips64p32 = 0 +const IsMips64p32le = 0 +const IsPpc = 0 +const IsPpc64 = 0 +const IsPpc64le = 0 +const IsRiscv = 0 +const IsRiscv64 = 0 +const IsS390 = 0 +const IsS390x = 0 +const IsSparc = 1 +const IsSparc64 = 0 +const IsWasm = 0 diff --git a/testing/internal/goarch/zgoarch_sparc64.go b/testing/internal/goarch/zgoarch_sparc64.go new file mode 100644 index 0000000..519aaa1 --- /dev/null +++ b/testing/internal/goarch/zgoarch_sparc64.go @@ -0,0 +1,32 @@ +// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT. + +//go:build sparc64 + +package goarch + +const GOARCH = `sparc64` + +const Is386 = 0 +const IsAmd64 = 0 +const IsAmd64p32 = 0 +const IsArm = 0 +const IsArmbe = 0 +const IsArm64 = 0 +const IsArm64be = 0 +const IsLoong64 = 0 +const IsMips = 0 +const IsMipsle = 0 +const IsMips64 = 0 +const IsMips64le = 0 +const IsMips64p32 = 0 +const IsMips64p32le = 0 +const IsPpc = 0 +const IsPpc64 = 0 +const IsPpc64le = 0 +const IsRiscv = 0 +const IsRiscv64 = 0 +const IsS390 = 0 +const IsS390x = 0 +const IsSparc = 0 +const IsSparc64 = 1 +const IsWasm = 0 diff --git a/testing/internal/goarch/zgoarch_wasm.go b/testing/internal/goarch/zgoarch_wasm.go new file mode 100644 index 0000000..25567a1 --- /dev/null +++ b/testing/internal/goarch/zgoarch_wasm.go @@ -0,0 +1,32 @@ +// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT. + +//go:build wasm + +package goarch + +const GOARCH = `wasm` + +const Is386 = 0 +const IsAmd64 = 0 +const IsAmd64p32 = 0 +const IsArm = 0 +const IsArmbe = 0 +const IsArm64 = 0 +const IsArm64be = 0 +const IsLoong64 = 0 +const IsMips = 0 +const IsMipsle = 0 +const IsMips64 = 0 +const IsMips64le = 0 +const IsMips64p32 = 0 +const IsMips64p32le = 0 +const IsPpc = 0 +const IsPpc64 = 0 +const IsPpc64le = 0 +const IsRiscv = 0 +const IsRiscv64 = 0 +const IsS390 = 0 +const IsS390x = 0 +const IsSparc = 0 +const IsSparc64 = 0 +const IsWasm = 1 diff --git a/testing/internal/godebug/godebug.go b/testing/internal/godebug/godebug.go new file mode 100644 index 0000000..dcf7a2c --- /dev/null +++ b/testing/internal/godebug/godebug.go @@ -0,0 +1,311 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package godebug makes the settings in the $GODEBUG environment variable +// available to other packages. These settings are often used for compatibility +// tweaks, when we need to change a default behavior but want to let users +// opt back in to the original. For example GODEBUG=http2server=0 disables +// HTTP/2 support in the net/http server. +// +// In typical usage, code should declare a Setting as a global +// and then call Value each time the current setting value is needed: +// +// var http2server = godebug.New("http2server") +// +// func ServeConn(c net.Conn) { +// if http2server.Value() == "0" { +// disallow HTTP/2 +// ... +// } +// ... +// } +// +// Each time a non-default setting causes a change in program behavior, +// code must call [Setting.IncNonDefault] to increment a counter that can +// be reported by [runtime/metrics.Read]. The call must only happen when +// the program executes a non-default behavior, not just when the setting +// is set to a non-default value. This is occasionally (but very rarely) +// infeasible, in which case the internal/godebugs table entry must set +// Opaque: true, and the documentation in doc/godebug.md should +// mention that metrics are unavailable. +// +// Conventionally, the global variable representing a godebug is named +// for the godebug itself, with no case changes: +// +// var gotypesalias = godebug.New("gotypesalias") // this +// var goTypesAlias = godebug.New("gotypesalias") // NOT THIS +// +// The test in internal/godebugs that checks for use of IncNonDefault +// requires the use of this convention. +// +// Note that counters used with IncNonDefault must be added to +// various tables in other packages. See the [Setting.IncNonDefault] +// documentation for details. +package godebug + +// Note: Be careful about new imports here. Any package +// that internal/godebug imports cannot itself import internal/godebug, +// meaning it cannot introduce a GODEBUG setting of its own. +// We keep imports to the absolute bare minimum. +import ( + "sync" + "sync/atomic" + "unsafe" + _ "unsafe" // go:linkname + + "github.com/CodSpeedHQ/codspeed-go/testing/internal/bisect" + "github.com/CodSpeedHQ/codspeed-go/testing/internal/godebugs" +) + +// A Setting is a single setting in the $GODEBUG environment variable. +type Setting struct { + name string + once sync.Once + *setting +} + +type setting struct { + value atomic.Pointer[value] + nonDefaultOnce sync.Once + nonDefault atomic.Uint64 + info *godebugs.Info +} + +type value struct { + text string + bisect *bisect.Matcher +} + +// New returns a new Setting for the $GODEBUG setting with the given name. +// +// GODEBUGs meant for use by end users must be listed in ../godebugs/table.go, +// which is used for generating and checking various documentation. +// If the name is not listed in that table, New will succeed but calling Value +// on the returned Setting will panic. +// To disable that panic for access to an undocumented setting, +// prefix the name with a #, as in godebug.New("#gofsystrace"). +// The # is a signal to New but not part of the key used in $GODEBUG. +// +// Note that almost all settings should arrange to call [IncNonDefault] precisely +// when program behavior is changing from the default due to the setting +// (not just when the setting is different, but when program behavior changes). +// See the [internal/godebug] package comment for more. +func New(name string) *Setting { + return &Setting{name: name} +} + +// Name returns the name of the setting. +func (s *Setting) Name() string { + if s.name != "" && s.name[0] == '#' { + return s.name[1:] + } + return s.name +} + +// Undocumented reports whether this is an undocumented setting. +func (s *Setting) Undocumented() bool { + return s.name != "" && s.name[0] == '#' +} + +// String returns a printable form for the setting: name=value. +func (s *Setting) String() string { + return s.Name() + "=" + s.Value() +} + +// IncNonDefault increments the non-default behavior counter +// associated with the given setting. +// This counter is exposed in the runtime/metrics value +// /godebug/non-default-behavior/:events. +// +// Note that Value must be called at least once before IncNonDefault. +func (s *Setting) IncNonDefault() { + s.nonDefaultOnce.Do(s.register) + s.nonDefault.Add(1) +} + +func (s *Setting) register() { + if s.info == nil || s.info.Opaque { + panic("godebug: unexpected IncNonDefault of " + s.name) + } + registerMetric("/godebug/non-default-behavior/"+s.Name()+":events", s.nonDefault.Load) +} + +// cache is a cache of all the GODEBUG settings, +// a locked map[string]*atomic.Pointer[string]. +// +// All Settings with the same name share a single +// *atomic.Pointer[string], so that when GODEBUG +// changes only that single atomic string pointer +// needs to be updated. +// +// A name appears in the values map either if it is the +// name of a Setting for which Value has been called +// at least once, or if the name has ever appeared in +// a name=value pair in the $GODEBUG environment variable. +// Once entered into the map, the name is never removed. +var cache sync.Map // name string -> value *atomic.Pointer[string] + +var empty value + +// Value returns the current value for the GODEBUG setting s. +// +// Value maintains an internal cache that is synchronized +// with changes to the $GODEBUG environment variable, +// making Value efficient to call as frequently as needed. +// Clients should therefore typically not attempt their own +// caching of Value's result. +func (s *Setting) Value() string { + s.once.Do(func() { + s.setting = lookup(s.Name()) + if s.info == nil && !s.Undocumented() { + panic("godebug: Value of name not listed in godebugs.All: " + s.name) + } + }) + v := *s.value.Load() + if v.bisect != nil && !v.bisect.Stack(&stderr) { + return "" + } + return v.text +} + +// lookup returns the unique *setting value for the given name. +func lookup(name string) *setting { + if v, ok := cache.Load(name); ok { + return v.(*setting) + } + s := new(setting) + s.info = godebugs.Lookup(name) + s.value.Store(&empty) + if v, loaded := cache.LoadOrStore(name, s); loaded { + // Lost race: someone else created it. Use theirs. + return v.(*setting) + } + + return s +} + +// setUpdate is provided by package runtime. +// It calls update(def, env), where def is the default GODEBUG setting +// and env is the current value of the $GODEBUG environment variable. +// After that first call, the runtime calls update(def, env) +// again each time the environment variable changes +// (due to use of os.Setenv, for example). +// +//go:linkname setUpdate +func setUpdate(update func(string, string)) + +// registerMetric is provided by package runtime. +// It forwards registrations to runtime/metrics. +// +//go:linkname registerMetric +func registerMetric(name string, read func() uint64) + +// setNewIncNonDefault is provided by package runtime. +// The runtime can do +// +// inc := newNonDefaultInc(name) +// +// instead of +// +// inc := godebug.New(name).IncNonDefault +// +// since it cannot import godebug. +// +//go:linkname setNewIncNonDefault +func setNewIncNonDefault(newIncNonDefault func(string) func()) + +func init() { + setUpdate(update) + setNewIncNonDefault(newIncNonDefault) +} + +func newIncNonDefault(name string) func() { + s := New(name) + s.Value() + return s.IncNonDefault +} + +var updateMu sync.Mutex + +// update records an updated GODEBUG setting. +// def is the default GODEBUG setting for the running binary, +// and env is the current value of the $GODEBUG environment variable. +func update(def, env string) { + updateMu.Lock() + defer updateMu.Unlock() + + // Update all the cached values, creating new ones as needed. + // We parse the environment variable first, so that any settings it has + // are already locked in place (did[name] = true) before we consider + // the defaults. + did := make(map[string]bool) + parse(did, env) + parse(did, def) + + // Clear any cached values that are no longer present. + cache.Range(func(name, s any) bool { + if !did[name.(string)] { + s.(*setting).value.Store(&empty) + } + return true + }) +} + +// parse parses the GODEBUG setting string s, +// which has the form k=v,k2=v2,k3=v3. +// Later settings override earlier ones. +// Parse only updates settings k=v for which did[k] = false. +// It also sets did[k] = true for settings that it updates. +// Each value v can also have the form v#pattern, +// in which case the GODEBUG is only enabled for call stacks +// matching pattern, for use with golang.org/x/tools/cmd/bisect. +func parse(did map[string]bool, s string) { + // Scan the string backward so that later settings are used + // and earlier settings are ignored. + // Note that a forward scan would cause cached values + // to temporarily use the ignored value before being + // updated to the "correct" one. + end := len(s) + eq := -1 + for i := end - 1; i >= -1; i-- { + if i == -1 || s[i] == ',' { + if eq >= 0 { + name, arg := s[i+1:eq], s[eq+1:end] + if !did[name] { + did[name] = true + v := &value{text: arg} + for j := 0; j < len(arg); j++ { + if arg[j] == '#' { + v.text = arg[:j] + v.bisect, _ = bisect.New(arg[j+1:]) + break + } + } + lookup(name).value.Store(v) + } + } + eq = -1 + end = i + } else if s[i] == '=' { + eq = i + } + } +} + +type runtimeStderr struct{} + +var stderr runtimeStderr + +func (*runtimeStderr) Write(b []byte) (int, error) { + if len(b) > 0 { + write(2, unsafe.Pointer(&b[0]), int32(len(b))) + } + return len(b), nil +} + +// Since we cannot import os or syscall, use the runtime's write function +// to print to standard error. +// +//go:linkname write runtime.write +func write(fd uintptr, p unsafe.Pointer, n int32) int32 diff --git a/testing/internal/godebug/godebug_test.go b/testing/internal/godebug/godebug_test.go new file mode 100644 index 0000000..b56dbf1 --- /dev/null +++ b/testing/internal/godebug/godebug_test.go @@ -0,0 +1,165 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package godebug_test + +import ( + "fmt" + "os" + "os/exec" + "runtime/metrics" + "slices" + "strings" + "testing" + + . "github.com/CodSpeedHQ/codspeed-go/testing/internal/godebug" + "github.com/CodSpeedHQ/codspeed-go/testing/internal/race" + "github.com/CodSpeedHQ/codspeed-go/testing/internal/testenv" +) + +func TestGet(t *testing.T) { + foo := New("#foo") + tests := []struct { + godebug string + setting *Setting + want string + }{ + {"", New("#"), ""}, + {"", foo, ""}, + {"foo=bar", foo, "bar"}, + {"foo=bar,after=x", foo, "bar"}, + {"before=x,foo=bar,after=x", foo, "bar"}, + {"before=x,foo=bar", foo, "bar"}, + {",,,foo=bar,,,", foo, "bar"}, + {"foodecoy=wrong,foo=bar", foo, "bar"}, + {"foo=", foo, ""}, + {"foo", foo, ""}, + {",foo", foo, ""}, + {"foo=bar,baz", New("#loooooooong"), ""}, + } + for _, tt := range tests { + t.Setenv("GODEBUG", tt.godebug) + got := tt.setting.Value() + if got != tt.want { + t.Errorf("get(%q, %q) = %q; want %q", tt.godebug, tt.setting.Name(), got, tt.want) + } + } +} + +func TestMetrics(t *testing.T) { + const name = "http2client" // must be a real name so runtime will accept it + + var m [1]metrics.Sample + m[0].Name = "/godebug/non-default-behavior/" + name + ":events" + metrics.Read(m[:]) + if kind := m[0].Value.Kind(); kind != metrics.KindUint64 { + t.Fatalf("NonDefault kind = %v, want uint64", kind) + } + + s := New(name) + s.Value() + s.IncNonDefault() + s.IncNonDefault() + s.IncNonDefault() + metrics.Read(m[:]) + if kind := m[0].Value.Kind(); kind != metrics.KindUint64 { + t.Fatalf("NonDefault kind = %v, want uint64", kind) + } + if count := m[0].Value.Uint64(); count != 3 { + t.Fatalf("NonDefault value = %d, want 3", count) + } +} + +// TestPanicNilRace checks for a race in the runtime caused by use of runtime +// atomics (not visible to usual race detection) to install the counter for +// non-default panic(nil) semantics. For #64649. +func TestPanicNilRace(t *testing.T) { + if !race.Enabled { + t.Skip("Skipping test intended for use with -race.") + } + if os.Getenv("GODEBUG") != "panicnil=1" { + cmd := testenv.CleanCmdEnv(testenv.Command(t, os.Args[0], "-test.run=^TestPanicNilRace$", "-test.v", "-test.parallel=2", "-test.count=1")) + cmd.Env = append(cmd.Env, "GODEBUG=panicnil=1") + out, err := cmd.CombinedOutput() + t.Logf("output:\n%s", out) + + if err != nil { + t.Errorf("Was not expecting a crash") + } + return + } + + test := func(t *testing.T) { + t.Parallel() + defer func() { + recover() + }() + panic(nil) + } + t.Run("One", test) + t.Run("Two", test) +} + +func TestCmdBisect(t *testing.T) { + testenv.MustHaveGoBuild(t) + out, err := exec.Command("go", "run", "cmd/vendor/golang.org/x/tools/cmd/bisect", "GODEBUG=buggy=1#PATTERN", os.Args[0], "-test.run=^TestBisectTestCase$").CombinedOutput() + if err != nil { + t.Fatalf("exec bisect: %v\n%s", err, out) + } + + var want []string + src, err := os.ReadFile("godebug_test.go") + if err != nil { + t.Fatal(err) + } + for i, line := range strings.Split(string(src), "\n") { + if strings.Contains(line, "BISECT"+" "+"BUG") { + want = append(want, fmt.Sprintf("godebug_test.go:%d", i+1)) + } + } + slices.Sort(want) + + var have []string + for _, line := range strings.Split(string(out), "\n") { + if strings.Contains(line, "godebug_test.go:") { + have = append(have, line[strings.LastIndex(line, "godebug_test.go:"):]) + } + } + slices.Sort(have) + + if !slices.Equal(have, want) { + t.Errorf("bad bisect output:\nhave %v\nwant %v\ncomplete output:\n%s", have, want, string(out)) + } +} + +// This test does nothing by itself, but you can run +// +// bisect 'GODEBUG=buggy=1#PATTERN' go test -run='^TestBisectTestCase$' +// +// to see that the GODEBUG bisect support is working. +// TestCmdBisect above does exactly that. +func TestBisectTestCase(t *testing.T) { + s := New("#buggy") + for i := 0; i < 10; i++ { + a := s.Value() == "1" + b := s.Value() == "1" + c := s.Value() == "1" // BISECT BUG + d := s.Value() == "1" // BISECT BUG + e := s.Value() == "1" // BISECT BUG + + if a { + t.Log("ok") + } + if b { + t.Log("ok") + } + if c { + t.Error("bug") + } + if d && + e { + t.Error("bug") + } + } +} diff --git a/testing/internal/godebugs/godebugs_test.go b/testing/internal/godebugs/godebugs_test.go new file mode 100644 index 0000000..e9e083a --- /dev/null +++ b/testing/internal/godebugs/godebugs_test.go @@ -0,0 +1,96 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package godebugs_test + +import ( + "os" + "os/exec" + "path/filepath" + "regexp" + "runtime" + "strings" + "testing" + + "github.com/CodSpeedHQ/codspeed-go/testing/internal/godebugs" + "github.com/CodSpeedHQ/codspeed-go/testing/internal/testenv" +) + +func TestAll(t *testing.T) { + testenv.MustHaveGoBuild(t) + + data, err := os.ReadFile("../../../doc/godebug.md") + if err != nil { + if os.IsNotExist(err) && (testenv.Builder() == "" || runtime.GOOS != "linux") { + t.Skip(err) + } + t.Fatal(err) + } + doc := string(data) + + incs := incNonDefaults(t) + + last := "" + for _, info := range godebugs.All { + if info.Name <= last { + t.Errorf("All not sorted: %s then %s", last, info.Name) + } + last = info.Name + + if info.Package == "" { + t.Errorf("Name=%s missing Package", info.Name) + } + if info.Changed != 0 && info.Old == "" { + t.Errorf("Name=%s has Changed, missing Old", info.Name) + } + if info.Old != "" && info.Changed == 0 { + t.Errorf("Name=%s has Old, missing Changed", info.Name) + } + if !strings.Contains(doc, "`"+info.Name+"`") && + !strings.Contains(doc, "`"+info.Name+"=") { + t.Errorf("Name=%s not documented in doc/godebug.md", info.Name) + } + if !info.Opaque && !incs[info.Name] { + t.Errorf("Name=%s missing IncNonDefault calls; see 'go doc internal/godebug'", info.Name) + } + } +} + +var incNonDefaultRE = regexp.MustCompile(`([\pL\p{Nd}_]+)\.IncNonDefault\(\)`) + +func incNonDefaults(t *testing.T) map[string]bool { + // Build list of all files importing internal/godebug. + // Tried a more sophisticated search in go list looking for + // imports containing "github.com/CodSpeedHQ/codspeed-go/testing/internal/godebug", but that turned + // up a bug in go list instead. #66218 + out, err := exec.Command("go", "list", "-f={{.Dir}}", "std", "cmd").CombinedOutput() + if err != nil { + t.Fatalf("go list: %v\n%s", err, out) + } + + seen := map[string]bool{} + for _, dir := range strings.Split(string(out), "\n") { + if dir == "" { + continue + } + files, err := os.ReadDir(dir) + if err != nil { + t.Fatal(err) + } + for _, file := range files { + name := file.Name() + if !strings.HasSuffix(name, ".go") || strings.HasSuffix(name, "_test.go") { + continue + } + data, err := os.ReadFile(filepath.Join(dir, name)) + if err != nil { + t.Fatal(err) + } + for _, m := range incNonDefaultRE.FindAllSubmatch(data, -1) { + seen[string(m[1])] = true + } + } + } + return seen +} diff --git a/testing/internal/godebugs/table.go b/testing/internal/godebugs/table.go new file mode 100644 index 0000000..9278a12 --- /dev/null +++ b/testing/internal/godebugs/table.go @@ -0,0 +1,91 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package godebugs provides a table of known GODEBUG settings, +// for use by a variety of other packages, including internal/godebug, +// runtime, runtime/metrics, and cmd/go/internal/load. +package godebugs + +// An Info describes a single known GODEBUG setting. +type Info struct { + Name string // name of the setting ("panicnil") + Package string // package that uses the setting ("runtime") + Changed int // minor version when default changed, if any; 21 means Go 1.21 + Old string // value that restores behavior prior to Changed + Opaque bool // setting does not export information to runtime/metrics using [internal/godebug.Setting.IncNonDefault] +} + +// All is the table of known settings, sorted by Name. +// +// Note: After adding entries to this table, run 'go generate runtime/metrics' +// to update the runtime/metrics doc comment. +// (Otherwise the runtime/metrics test will fail.) +// +// Note: After adding entries to this table, update the list in doc/godebug.md as well. +// (Otherwise the test in this package will fail.) +var All = []Info{ + {Name: "allowmultiplevcs", Package: "cmd/go"}, + {Name: "asynctimerchan", Package: "time", Changed: 23, Old: "1"}, + {Name: "dataindependenttiming", Package: "crypto/subtle", Opaque: true}, + {Name: "execerrdot", Package: "os/exec"}, + {Name: "fips140", Package: "crypto/fips140", Opaque: true}, + {Name: "gocachehash", Package: "cmd/go"}, + {Name: "gocachetest", Package: "cmd/go"}, + {Name: "gocacheverify", Package: "cmd/go"}, + {Name: "gotestjsonbuildtext", Package: "cmd/go", Changed: 24, Old: "1"}, + {Name: "gotypesalias", Package: "go/types", Changed: 23, Old: "0"}, + {Name: "http2client", Package: "net/http"}, + {Name: "http2debug", Package: "net/http", Opaque: true}, + {Name: "http2server", Package: "net/http"}, + {Name: "httplaxcontentlength", Package: "net/http", Changed: 22, Old: "1"}, + {Name: "httpmuxgo121", Package: "net/http", Changed: 22, Old: "1"}, + {Name: "httpservecontentkeepheaders", Package: "net/http", Changed: 23, Old: "1"}, + {Name: "installgoroot", Package: "go/build"}, + {Name: "jstmpllitinterp", Package: "html/template", Opaque: true}, // bug #66217: remove Opaque + //{Name: "multipartfiles", Package: "mime/multipart"}, + {Name: "multipartmaxheaders", Package: "mime/multipart"}, + {Name: "multipartmaxparts", Package: "mime/multipart"}, + {Name: "multipathtcp", Package: "net", Changed: 24, Old: "0"}, + {Name: "netdns", Package: "net", Opaque: true}, + {Name: "netedns0", Package: "net", Changed: 19, Old: "0"}, + {Name: "panicnil", Package: "runtime", Changed: 21, Old: "1"}, + {Name: "randautoseed", Package: "math/rand"}, + {Name: "randseednop", Package: "math/rand", Changed: 24, Old: "0"}, + {Name: "rsa1024min", Package: "crypto/rsa", Changed: 24, Old: "0"}, + {Name: "tarinsecurepath", Package: "archive/tar"}, + {Name: "tls10server", Package: "crypto/tls", Changed: 22, Old: "1"}, + {Name: "tls3des", Package: "crypto/tls", Changed: 23, Old: "1"}, + {Name: "tlsmaxrsasize", Package: "crypto/tls"}, + {Name: "tlsmlkem", Package: "crypto/tls", Changed: 24, Old: "0", Opaque: true}, + {Name: "tlsrsakex", Package: "crypto/tls", Changed: 22, Old: "1"}, + {Name: "tlsunsafeekm", Package: "crypto/tls", Changed: 22, Old: "1"}, + {Name: "winreadlinkvolume", Package: "os", Changed: 23, Old: "0"}, + {Name: "winsymlink", Package: "os", Changed: 23, Old: "0"}, + {Name: "x509keypairleaf", Package: "crypto/tls", Changed: 23, Old: "0"}, + {Name: "x509negativeserial", Package: "crypto/x509", Changed: 23, Old: "1"}, + {Name: "x509rsacrt", Package: "crypto/x509", Changed: 24, Old: "0"}, + {Name: "x509usefallbackroots", Package: "crypto/x509"}, + {Name: "x509usepolicies", Package: "crypto/x509", Changed: 24, Old: "0"}, + {Name: "zipinsecurepath", Package: "archive/zip"}, +} + +// Lookup returns the Info with the given name. +func Lookup(name string) *Info { + // binary search, avoiding import of sort. + lo := 0 + hi := len(All) + for lo < hi { + m := int(uint(lo+hi) >> 1) + mid := All[m].Name + if name == mid { + return &All[m] + } + if name < mid { + hi = m + } else { + lo = m + 1 + } + } + return nil +} diff --git a/testing/internal/goexperiment/exp_aliastypeparams_off.go b/testing/internal/goexperiment/exp_aliastypeparams_off.go new file mode 100644 index 0000000..620d34e --- /dev/null +++ b/testing/internal/goexperiment/exp_aliastypeparams_off.go @@ -0,0 +1,8 @@ +// Code generated by mkconsts.go. DO NOT EDIT. + +//go:build !goexperiment.aliastypeparams + +package goexperiment + +const AliasTypeParams = false +const AliasTypeParamsInt = 0 diff --git a/testing/internal/goexperiment/exp_aliastypeparams_on.go b/testing/internal/goexperiment/exp_aliastypeparams_on.go new file mode 100644 index 0000000..8f6872c --- /dev/null +++ b/testing/internal/goexperiment/exp_aliastypeparams_on.go @@ -0,0 +1,8 @@ +// Code generated by mkconsts.go. DO NOT EDIT. + +//go:build goexperiment.aliastypeparams + +package goexperiment + +const AliasTypeParams = true +const AliasTypeParamsInt = 1 diff --git a/testing/internal/goexperiment/exp_arenas_off.go b/testing/internal/goexperiment/exp_arenas_off.go new file mode 100644 index 0000000..01f5332 --- /dev/null +++ b/testing/internal/goexperiment/exp_arenas_off.go @@ -0,0 +1,8 @@ +// Code generated by mkconsts.go. DO NOT EDIT. + +//go:build !goexperiment.arenas + +package goexperiment + +const Arenas = false +const ArenasInt = 0 diff --git a/testing/internal/goexperiment/exp_arenas_on.go b/testing/internal/goexperiment/exp_arenas_on.go new file mode 100644 index 0000000..609dfbc --- /dev/null +++ b/testing/internal/goexperiment/exp_arenas_on.go @@ -0,0 +1,8 @@ +// Code generated by mkconsts.go. DO NOT EDIT. + +//go:build goexperiment.arenas + +package goexperiment + +const Arenas = true +const ArenasInt = 1 diff --git a/testing/internal/goexperiment/exp_boringcrypto_off.go b/testing/internal/goexperiment/exp_boringcrypto_off.go new file mode 100644 index 0000000..de71267 --- /dev/null +++ b/testing/internal/goexperiment/exp_boringcrypto_off.go @@ -0,0 +1,8 @@ +// Code generated by mkconsts.go. DO NOT EDIT. + +//go:build !goexperiment.boringcrypto + +package goexperiment + +const BoringCrypto = false +const BoringCryptoInt = 0 diff --git a/testing/internal/goexperiment/exp_boringcrypto_on.go b/testing/internal/goexperiment/exp_boringcrypto_on.go new file mode 100644 index 0000000..ce476fa --- /dev/null +++ b/testing/internal/goexperiment/exp_boringcrypto_on.go @@ -0,0 +1,8 @@ +// Code generated by mkconsts.go. DO NOT EDIT. + +//go:build goexperiment.boringcrypto + +package goexperiment + +const BoringCrypto = true +const BoringCryptoInt = 1 diff --git a/testing/internal/goexperiment/exp_cacheprog_off.go b/testing/internal/goexperiment/exp_cacheprog_off.go new file mode 100644 index 0000000..276855c --- /dev/null +++ b/testing/internal/goexperiment/exp_cacheprog_off.go @@ -0,0 +1,8 @@ +// Code generated by mkconsts.go. DO NOT EDIT. + +//go:build !goexperiment.cacheprog + +package goexperiment + +const CacheProg = false +const CacheProgInt = 0 diff --git a/testing/internal/goexperiment/exp_cacheprog_on.go b/testing/internal/goexperiment/exp_cacheprog_on.go new file mode 100644 index 0000000..b959dd6 --- /dev/null +++ b/testing/internal/goexperiment/exp_cacheprog_on.go @@ -0,0 +1,8 @@ +// Code generated by mkconsts.go. DO NOT EDIT. + +//go:build goexperiment.cacheprog + +package goexperiment + +const CacheProg = true +const CacheProgInt = 1 diff --git a/testing/internal/goexperiment/exp_cgocheck2_off.go b/testing/internal/goexperiment/exp_cgocheck2_off.go new file mode 100644 index 0000000..e99ad07 --- /dev/null +++ b/testing/internal/goexperiment/exp_cgocheck2_off.go @@ -0,0 +1,8 @@ +// Code generated by mkconsts.go. DO NOT EDIT. + +//go:build !goexperiment.cgocheck2 + +package goexperiment + +const CgoCheck2 = false +const CgoCheck2Int = 0 diff --git a/testing/internal/goexperiment/exp_cgocheck2_on.go b/testing/internal/goexperiment/exp_cgocheck2_on.go new file mode 100644 index 0000000..f6d1790 --- /dev/null +++ b/testing/internal/goexperiment/exp_cgocheck2_on.go @@ -0,0 +1,8 @@ +// Code generated by mkconsts.go. DO NOT EDIT. + +//go:build goexperiment.cgocheck2 + +package goexperiment + +const CgoCheck2 = true +const CgoCheck2Int = 1 diff --git a/testing/internal/goexperiment/exp_coverageredesign_off.go b/testing/internal/goexperiment/exp_coverageredesign_off.go new file mode 100644 index 0000000..2c33177 --- /dev/null +++ b/testing/internal/goexperiment/exp_coverageredesign_off.go @@ -0,0 +1,8 @@ +// Code generated by mkconsts.go. DO NOT EDIT. + +//go:build !goexperiment.coverageredesign + +package goexperiment + +const CoverageRedesign = false +const CoverageRedesignInt = 0 diff --git a/testing/internal/goexperiment/exp_coverageredesign_on.go b/testing/internal/goexperiment/exp_coverageredesign_on.go new file mode 100644 index 0000000..3fc6c2f --- /dev/null +++ b/testing/internal/goexperiment/exp_coverageredesign_on.go @@ -0,0 +1,8 @@ +// Code generated by mkconsts.go. DO NOT EDIT. + +//go:build goexperiment.coverageredesign + +package goexperiment + +const CoverageRedesign = true +const CoverageRedesignInt = 1 diff --git a/testing/internal/goexperiment/exp_fieldtrack_off.go b/testing/internal/goexperiment/exp_fieldtrack_off.go new file mode 100644 index 0000000..ccced94 --- /dev/null +++ b/testing/internal/goexperiment/exp_fieldtrack_off.go @@ -0,0 +1,8 @@ +// Code generated by mkconsts.go. DO NOT EDIT. + +//go:build !goexperiment.fieldtrack + +package goexperiment + +const FieldTrack = false +const FieldTrackInt = 0 diff --git a/testing/internal/goexperiment/exp_fieldtrack_on.go b/testing/internal/goexperiment/exp_fieldtrack_on.go new file mode 100644 index 0000000..a497567 --- /dev/null +++ b/testing/internal/goexperiment/exp_fieldtrack_on.go @@ -0,0 +1,8 @@ +// Code generated by mkconsts.go. DO NOT EDIT. + +//go:build goexperiment.fieldtrack + +package goexperiment + +const FieldTrack = true +const FieldTrackInt = 1 diff --git a/testing/internal/goexperiment/exp_heapminimum512kib_off.go b/testing/internal/goexperiment/exp_heapminimum512kib_off.go new file mode 100644 index 0000000..d67c5bb --- /dev/null +++ b/testing/internal/goexperiment/exp_heapminimum512kib_off.go @@ -0,0 +1,8 @@ +// Code generated by mkconsts.go. DO NOT EDIT. + +//go:build !goexperiment.heapminimum512kib + +package goexperiment + +const HeapMinimum512KiB = false +const HeapMinimum512KiBInt = 0 diff --git a/testing/internal/goexperiment/exp_heapminimum512kib_on.go b/testing/internal/goexperiment/exp_heapminimum512kib_on.go new file mode 100644 index 0000000..2d29c98 --- /dev/null +++ b/testing/internal/goexperiment/exp_heapminimum512kib_on.go @@ -0,0 +1,8 @@ +// Code generated by mkconsts.go. DO NOT EDIT. + +//go:build goexperiment.heapminimum512kib + +package goexperiment + +const HeapMinimum512KiB = true +const HeapMinimum512KiBInt = 1 diff --git a/testing/internal/goexperiment/exp_loopvar_off.go b/testing/internal/goexperiment/exp_loopvar_off.go new file mode 100644 index 0000000..cfede54 --- /dev/null +++ b/testing/internal/goexperiment/exp_loopvar_off.go @@ -0,0 +1,8 @@ +// Code generated by mkconsts.go. DO NOT EDIT. + +//go:build !goexperiment.loopvar + +package goexperiment + +const LoopVar = false +const LoopVarInt = 0 diff --git a/testing/internal/goexperiment/exp_loopvar_on.go b/testing/internal/goexperiment/exp_loopvar_on.go new file mode 100644 index 0000000..e158e0a --- /dev/null +++ b/testing/internal/goexperiment/exp_loopvar_on.go @@ -0,0 +1,8 @@ +// Code generated by mkconsts.go. DO NOT EDIT. + +//go:build goexperiment.loopvar + +package goexperiment + +const LoopVar = true +const LoopVarInt = 1 diff --git a/testing/internal/goexperiment/exp_newinliner_off.go b/testing/internal/goexperiment/exp_newinliner_off.go new file mode 100644 index 0000000..d94e736 --- /dev/null +++ b/testing/internal/goexperiment/exp_newinliner_off.go @@ -0,0 +1,8 @@ +// Code generated by mkconsts.go. DO NOT EDIT. + +//go:build !goexperiment.newinliner + +package goexperiment + +const NewInliner = false +const NewInlinerInt = 0 diff --git a/testing/internal/goexperiment/exp_newinliner_on.go b/testing/internal/goexperiment/exp_newinliner_on.go new file mode 100644 index 0000000..6777dbc --- /dev/null +++ b/testing/internal/goexperiment/exp_newinliner_on.go @@ -0,0 +1,8 @@ +// Code generated by mkconsts.go. DO NOT EDIT. + +//go:build goexperiment.newinliner + +package goexperiment + +const NewInliner = true +const NewInlinerInt = 1 diff --git a/testing/internal/goexperiment/exp_preemptibleloops_off.go b/testing/internal/goexperiment/exp_preemptibleloops_off.go new file mode 100644 index 0000000..cddcc1b --- /dev/null +++ b/testing/internal/goexperiment/exp_preemptibleloops_off.go @@ -0,0 +1,8 @@ +// Code generated by mkconsts.go. DO NOT EDIT. + +//go:build !goexperiment.preemptibleloops + +package goexperiment + +const PreemptibleLoops = false +const PreemptibleLoopsInt = 0 diff --git a/testing/internal/goexperiment/exp_preemptibleloops_on.go b/testing/internal/goexperiment/exp_preemptibleloops_on.go new file mode 100644 index 0000000..7f474c0 --- /dev/null +++ b/testing/internal/goexperiment/exp_preemptibleloops_on.go @@ -0,0 +1,8 @@ +// Code generated by mkconsts.go. DO NOT EDIT. + +//go:build goexperiment.preemptibleloops + +package goexperiment + +const PreemptibleLoops = true +const PreemptibleLoopsInt = 1 diff --git a/testing/internal/goexperiment/exp_rangefunc_off.go b/testing/internal/goexperiment/exp_rangefunc_off.go new file mode 100644 index 0000000..fc02820 --- /dev/null +++ b/testing/internal/goexperiment/exp_rangefunc_off.go @@ -0,0 +1,8 @@ +// Code generated by mkconsts.go. DO NOT EDIT. + +//go:build !goexperiment.rangefunc + +package goexperiment + +const RangeFunc = false +const RangeFuncInt = 0 diff --git a/testing/internal/goexperiment/exp_rangefunc_on.go b/testing/internal/goexperiment/exp_rangefunc_on.go new file mode 100644 index 0000000..25e7bd3 --- /dev/null +++ b/testing/internal/goexperiment/exp_rangefunc_on.go @@ -0,0 +1,8 @@ +// Code generated by mkconsts.go. DO NOT EDIT. + +//go:build goexperiment.rangefunc + +package goexperiment + +const RangeFunc = true +const RangeFuncInt = 1 diff --git a/testing/internal/goexperiment/exp_regabiargs_off.go b/testing/internal/goexperiment/exp_regabiargs_off.go new file mode 100644 index 0000000..a8c8536 --- /dev/null +++ b/testing/internal/goexperiment/exp_regabiargs_off.go @@ -0,0 +1,8 @@ +// Code generated by mkconsts.go. DO NOT EDIT. + +//go:build !goexperiment.regabiargs + +package goexperiment + +const RegabiArgs = false +const RegabiArgsInt = 0 diff --git a/testing/internal/goexperiment/exp_regabiargs_on.go b/testing/internal/goexperiment/exp_regabiargs_on.go new file mode 100644 index 0000000..def3b94 --- /dev/null +++ b/testing/internal/goexperiment/exp_regabiargs_on.go @@ -0,0 +1,8 @@ +// Code generated by mkconsts.go. DO NOT EDIT. + +//go:build goexperiment.regabiargs + +package goexperiment + +const RegabiArgs = true +const RegabiArgsInt = 1 diff --git a/testing/internal/goexperiment/exp_regabiwrappers_off.go b/testing/internal/goexperiment/exp_regabiwrappers_off.go new file mode 100644 index 0000000..a65ed36 --- /dev/null +++ b/testing/internal/goexperiment/exp_regabiwrappers_off.go @@ -0,0 +1,8 @@ +// Code generated by mkconsts.go. DO NOT EDIT. + +//go:build !goexperiment.regabiwrappers + +package goexperiment + +const RegabiWrappers = false +const RegabiWrappersInt = 0 diff --git a/testing/internal/goexperiment/exp_regabiwrappers_on.go b/testing/internal/goexperiment/exp_regabiwrappers_on.go new file mode 100644 index 0000000..d525c9a --- /dev/null +++ b/testing/internal/goexperiment/exp_regabiwrappers_on.go @@ -0,0 +1,8 @@ +// Code generated by mkconsts.go. DO NOT EDIT. + +//go:build goexperiment.regabiwrappers + +package goexperiment + +const RegabiWrappers = true +const RegabiWrappersInt = 1 diff --git a/testing/internal/goexperiment/exp_spinbitmutex_off.go b/testing/internal/goexperiment/exp_spinbitmutex_off.go new file mode 100644 index 0000000..776b0dc --- /dev/null +++ b/testing/internal/goexperiment/exp_spinbitmutex_off.go @@ -0,0 +1,8 @@ +// Code generated by mkconsts.go. DO NOT EDIT. + +//go:build !goexperiment.spinbitmutex + +package goexperiment + +const SpinbitMutex = false +const SpinbitMutexInt = 0 diff --git a/testing/internal/goexperiment/exp_spinbitmutex_on.go b/testing/internal/goexperiment/exp_spinbitmutex_on.go new file mode 100644 index 0000000..8468030 --- /dev/null +++ b/testing/internal/goexperiment/exp_spinbitmutex_on.go @@ -0,0 +1,8 @@ +// Code generated by mkconsts.go. DO NOT EDIT. + +//go:build goexperiment.spinbitmutex + +package goexperiment + +const SpinbitMutex = true +const SpinbitMutexInt = 1 diff --git a/testing/internal/goexperiment/exp_staticlockranking_off.go b/testing/internal/goexperiment/exp_staticlockranking_off.go new file mode 100644 index 0000000..5fafff2 --- /dev/null +++ b/testing/internal/goexperiment/exp_staticlockranking_off.go @@ -0,0 +1,8 @@ +// Code generated by mkconsts.go. DO NOT EDIT. + +//go:build !goexperiment.staticlockranking + +package goexperiment + +const StaticLockRanking = false +const StaticLockRankingInt = 0 diff --git a/testing/internal/goexperiment/exp_staticlockranking_on.go b/testing/internal/goexperiment/exp_staticlockranking_on.go new file mode 100644 index 0000000..dfd32a8 --- /dev/null +++ b/testing/internal/goexperiment/exp_staticlockranking_on.go @@ -0,0 +1,8 @@ +// Code generated by mkconsts.go. DO NOT EDIT. + +//go:build goexperiment.staticlockranking + +package goexperiment + +const StaticLockRanking = true +const StaticLockRankingInt = 1 diff --git a/testing/internal/goexperiment/exp_swissmap_off.go b/testing/internal/goexperiment/exp_swissmap_off.go new file mode 100644 index 0000000..2af40aa --- /dev/null +++ b/testing/internal/goexperiment/exp_swissmap_off.go @@ -0,0 +1,8 @@ +// Code generated by mkconsts.go. DO NOT EDIT. + +//go:build !goexperiment.swissmap + +package goexperiment + +const SwissMap = false +const SwissMapInt = 0 diff --git a/testing/internal/goexperiment/exp_swissmap_on.go b/testing/internal/goexperiment/exp_swissmap_on.go new file mode 100644 index 0000000..73be49b --- /dev/null +++ b/testing/internal/goexperiment/exp_swissmap_on.go @@ -0,0 +1,8 @@ +// Code generated by mkconsts.go. DO NOT EDIT. + +//go:build goexperiment.swissmap + +package goexperiment + +const SwissMap = true +const SwissMapInt = 1 diff --git a/testing/internal/goexperiment/exp_synchashtriemap_off.go b/testing/internal/goexperiment/exp_synchashtriemap_off.go new file mode 100644 index 0000000..cab23aa --- /dev/null +++ b/testing/internal/goexperiment/exp_synchashtriemap_off.go @@ -0,0 +1,8 @@ +// Code generated by mkconsts.go. DO NOT EDIT. + +//go:build !goexperiment.synchashtriemap + +package goexperiment + +const SyncHashTrieMap = false +const SyncHashTrieMapInt = 0 diff --git a/testing/internal/goexperiment/exp_synchashtriemap_on.go b/testing/internal/goexperiment/exp_synchashtriemap_on.go new file mode 100644 index 0000000..87433ef --- /dev/null +++ b/testing/internal/goexperiment/exp_synchashtriemap_on.go @@ -0,0 +1,8 @@ +// Code generated by mkconsts.go. DO NOT EDIT. + +//go:build goexperiment.synchashtriemap + +package goexperiment + +const SyncHashTrieMap = true +const SyncHashTrieMapInt = 1 diff --git a/testing/internal/goexperiment/exp_synctest_off.go b/testing/internal/goexperiment/exp_synctest_off.go new file mode 100644 index 0000000..fade13f --- /dev/null +++ b/testing/internal/goexperiment/exp_synctest_off.go @@ -0,0 +1,8 @@ +// Code generated by mkconsts.go. DO NOT EDIT. + +//go:build !goexperiment.synctest + +package goexperiment + +const Synctest = false +const SynctestInt = 0 diff --git a/testing/internal/goexperiment/exp_synctest_on.go b/testing/internal/goexperiment/exp_synctest_on.go new file mode 100644 index 0000000..9c44be7 --- /dev/null +++ b/testing/internal/goexperiment/exp_synctest_on.go @@ -0,0 +1,8 @@ +// Code generated by mkconsts.go. DO NOT EDIT. + +//go:build goexperiment.synctest + +package goexperiment + +const Synctest = true +const SynctestInt = 1 diff --git a/testing/internal/goexperiment/flags.go b/testing/internal/goexperiment/flags.go new file mode 100644 index 0000000..948ed5c --- /dev/null +++ b/testing/internal/goexperiment/flags.go @@ -0,0 +1,131 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package goexperiment implements support for toolchain experiments. +// +// Toolchain experiments are controlled by the GOEXPERIMENT +// environment variable. GOEXPERIMENT is a comma-separated list of +// experiment names. GOEXPERIMENT can be set at make.bash time, which +// sets the default experiments for binaries built with the tool +// chain; or it can be set at build time. GOEXPERIMENT can also be set +// to "none", which disables any experiments that were enabled at +// make.bash time. +// +// Experiments are exposed to the build in the following ways: +// +// - Build tag goexperiment.x is set if experiment x (lower case) is +// enabled. +// +// - For each experiment x (in camel case), this package contains a +// boolean constant x and an integer constant xInt. +// +// - In runtime assembly, the macro GOEXPERIMENT_x is defined if +// experiment x (lower case) is enabled. +// +// In the toolchain, the set of experiments enabled for the current +// build should be accessed via objabi.Experiment. +// +// The set of experiments is included in the output of runtime.Version() +// and "go version " if it differs from the default experiments. +// +// For the set of experiments supported by the current toolchain, see +// "go doc goexperiment.Flags". +// +// Note that this package defines the set of experiments (in Flags) +// and records the experiments that were enabled when the package +// was compiled (as boolean and integer constants). +// +// Note especially that this package does not itself change behavior +// at run time based on the GOEXPERIMENT variable. +// The code used in builds to interpret the GOEXPERIMENT variable +// is in the separate package internal/buildcfg. +package goexperiment + +//go:generate go run mkconsts.go + +// Flags is the set of experiments that can be enabled or disabled in +// the current toolchain. +// +// When specified in the GOEXPERIMENT environment variable or as build +// tags, experiments use the strings.ToLower of their field name. +// +// For the baseline experimental configuration, see +// [internal/buildcfg.ParseGOEXPERIMENT]. +// +// If you change this struct definition, run "go generate". +type Flags struct { + FieldTrack bool + PreemptibleLoops bool + StaticLockRanking bool + BoringCrypto bool + + // Regabi is split into several sub-experiments that can be + // enabled individually. Not all combinations work. + // The "regabi" GOEXPERIMENT is an alias for all "working" + // subexperiments. + + // RegabiWrappers enables ABI wrappers for calling between + // ABI0 and ABIInternal functions. Without this, the ABIs are + // assumed to be identical so cross-ABI calls are direct. + RegabiWrappers bool + // RegabiArgs enables register arguments/results in all + // compiled Go functions. + // + // Requires wrappers (to do ABI translation), and reflect (so + // reflection calls use registers). + RegabiArgs bool + + // HeapMinimum512KiB reduces the minimum heap size to 512 KiB. + // + // This was originally reduced as part of PacerRedesign, but + // has been broken out to its own experiment that is disabled + // by default. + HeapMinimum512KiB bool + + // CoverageRedesign enables the new compiler-based code coverage + // tooling. + CoverageRedesign bool + + // Arenas causes the "arena" standard library package to be visible + // to the outside world. + Arenas bool + + // CgoCheck2 enables an expensive cgo rule checker. + // When this experiment is enabled, cgo rule checks occur regardless + // of the GODEBUG=cgocheck setting provided at runtime. + CgoCheck2 bool + + // LoopVar changes loop semantics so that each iteration gets its own + // copy of the iteration variable. + LoopVar bool + + // CacheProg adds support to cmd/go to use a child process to implement + // the build cache; see https://github.com/golang/go/issues/59719. + CacheProg bool + + // NewInliner enables a new+improved version of the function + // inlining phase within the Go compiler. + NewInliner bool + + // RangeFunc enables range over func. + RangeFunc bool + + // AliasTypeParams enables type parameters for alias types. + // Requires that gotypesalias=1 is set with GODEBUG. + // This flag will be removed with Go 1.25. + AliasTypeParams bool + + // SwissMap enables the SwissTable-based map implementation. + SwissMap bool + + // SpinbitMutex enables the new "spinbit" mutex implementation on supported + // platforms. See https://go.dev/issue/68578. + SpinbitMutex bool + + // SyncHashTrieMap enables the HashTrieMap sync.Map implementation. + SyncHashTrieMap bool + + // Synctest enables the testing/synctest package. + Synctest bool +} diff --git a/testing/internal/goexperiment/mkconsts.go b/testing/internal/goexperiment/mkconsts.go new file mode 100644 index 0000000..1a7bc4a --- /dev/null +++ b/testing/internal/goexperiment/mkconsts.go @@ -0,0 +1,73 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build ignore + +// mkconsts generates const definition files for each GOEXPERIMENT. +package main + +import ( + "bytes" + "fmt" + "log" + "os" + "reflect" + "strings" + + "github.com/CodSpeedHQ/codspeed-go/testing/internal/goexperiment" +) + +func main() { + // Delete existing experiment constant files. + ents, err := os.ReadDir(".") + if err != nil { + log.Fatal(err) + } + for _, ent := range ents { + name := ent.Name() + if !strings.HasPrefix(name, "exp_") { + continue + } + // Check that this is definitely a generated file. + data, err := os.ReadFile(name) + if err != nil { + log.Fatalf("reading %s: %v", name, err) + } + if !bytes.Contains(data, []byte("Code generated by mkconsts")) { + log.Fatalf("%s: expected generated file", name) + } + if err := os.Remove(name); err != nil { + log.Fatal(err) + } + } + + // Generate new experiment constant files. + rt := reflect.TypeOf(&goexperiment.Flags{}).Elem() + for i := 0; i < rt.NumField(); i++ { + f := rt.Field(i).Name + buildTag := "goexperiment." + strings.ToLower(f) + for _, val := range []bool{false, true} { + name := fmt.Sprintf("exp_%s_%s.go", strings.ToLower(f), pick(val, "off", "on")) + data := fmt.Sprintf(`// Code generated by mkconsts.go. DO NOT EDIT. + +//go:build %s%s + +package goexperiment + +const %s = %v +const %sInt = %s +`, pick(val, "!", ""), buildTag, f, val, f, pick(val, "0", "1")) + if err := os.WriteFile(name, []byte(data), 0666); err != nil { + log.Fatalf("writing %s: %v", name, err) + } + } + } +} + +func pick(v bool, f, t string) string { + if v { + return t + } + return f +} diff --git a/testing/internal/goos/gengoos.go b/testing/internal/goos/gengoos.go new file mode 100644 index 0000000..e0d4d38 --- /dev/null +++ b/testing/internal/goos/gengoos.go @@ -0,0 +1,71 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build ignore + +package main + +import ( + "bytes" + "fmt" + "log" + "os" + "strings" +) + +var gooses []string + +func main() { + data, err := os.ReadFile("../../internal/syslist/syslist.go") + if err != nil { + log.Fatal(err) + } + const goosPrefix = `var KnownOS = map[string]bool{` + inGOOS := false + for _, line := range strings.Split(string(data), "\n") { + if strings.HasPrefix(line, goosPrefix) { + inGOOS = true + } else if inGOOS && strings.HasPrefix(line, "}") { + break + } else if inGOOS { + goos := strings.Fields(line)[0] + goos = strings.TrimPrefix(goos, `"`) + goos = strings.TrimSuffix(goos, `":`) + gooses = append(gooses, goos) + } + } + + for _, target := range gooses { + if target == "nacl" { + continue + } + var tags []string + if target == "linux" { + tags = append(tags, "!android") // must explicitly exclude android for linux + } + if target == "solaris" { + tags = append(tags, "!illumos") // must explicitly exclude illumos for solaris + } + if target == "darwin" { + tags = append(tags, "!ios") // must explicitly exclude ios for darwin + } + tags = append(tags, target) // must explicitly include target for bootstrapping purposes + var buf bytes.Buffer + fmt.Fprintf(&buf, "// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.\n\n") + fmt.Fprintf(&buf, "//go:build %s\n\n", strings.Join(tags, " && ")) + fmt.Fprintf(&buf, "package goos\n\n") + fmt.Fprintf(&buf, "const GOOS = `%s`\n\n", target) + for _, goos := range gooses { + value := 0 + if goos == target { + value = 1 + } + fmt.Fprintf(&buf, "const Is%s = %d\n", strings.Title(goos), value) + } + err := os.WriteFile("zgoos_"+target+".go", buf.Bytes(), 0666) + if err != nil { + log.Fatal(err) + } + } +} diff --git a/testing/internal/goos/goos.go b/testing/internal/goos/goos.go new file mode 100644 index 0000000..02dc968 --- /dev/null +++ b/testing/internal/goos/goos.go @@ -0,0 +1,13 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// package goos contains GOOS-specific constants. +package goos + +// The next line makes 'go generate' write the zgoos*.go files with +// per-OS information, including constants named Is$GOOS for every +// known GOOS. The constant is 1 on the current system, 0 otherwise; +// multiplying by them is useful for defining GOOS-specific constants. +// +//go:generate go run gengoos.go diff --git a/testing/internal/goos/nonunix.go b/testing/internal/goos/nonunix.go new file mode 100644 index 0000000..2ba5c85 --- /dev/null +++ b/testing/internal/goos/nonunix.go @@ -0,0 +1,9 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !unix + +package goos + +const IsUnix = false diff --git a/testing/internal/goos/unix.go b/testing/internal/goos/unix.go new file mode 100644 index 0000000..6cfd5ef --- /dev/null +++ b/testing/internal/goos/unix.go @@ -0,0 +1,9 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build unix + +package goos + +const IsUnix = true diff --git a/testing/internal/goos/zgoos_aix.go b/testing/internal/goos/zgoos_aix.go new file mode 100644 index 0000000..24e05c9 --- /dev/null +++ b/testing/internal/goos/zgoos_aix.go @@ -0,0 +1,26 @@ +// Code generated by gengoos.go using 'go generate'. DO NOT EDIT. + +//go:build aix + +package goos + +const GOOS = `aix` + +const IsAix = 1 +const IsAndroid = 0 +const IsDarwin = 0 +const IsDragonfly = 0 +const IsFreebsd = 0 +const IsHurd = 0 +const IsIllumos = 0 +const IsIos = 0 +const IsJs = 0 +const IsLinux = 0 +const IsNacl = 0 +const IsNetbsd = 0 +const IsOpenbsd = 0 +const IsPlan9 = 0 +const IsSolaris = 0 +const IsWasip1 = 0 +const IsWindows = 0 +const IsZos = 0 diff --git a/testing/internal/goos/zgoos_android.go b/testing/internal/goos/zgoos_android.go new file mode 100644 index 0000000..3c4a318 --- /dev/null +++ b/testing/internal/goos/zgoos_android.go @@ -0,0 +1,26 @@ +// Code generated by gengoos.go using 'go generate'. DO NOT EDIT. + +//go:build android + +package goos + +const GOOS = `android` + +const IsAix = 0 +const IsAndroid = 1 +const IsDarwin = 0 +const IsDragonfly = 0 +const IsFreebsd = 0 +const IsHurd = 0 +const IsIllumos = 0 +const IsIos = 0 +const IsJs = 0 +const IsLinux = 0 +const IsNacl = 0 +const IsNetbsd = 0 +const IsOpenbsd = 0 +const IsPlan9 = 0 +const IsSolaris = 0 +const IsWasip1 = 0 +const IsWindows = 0 +const IsZos = 0 diff --git a/testing/internal/goos/zgoos_darwin.go b/testing/internal/goos/zgoos_darwin.go new file mode 100644 index 0000000..10b1499 --- /dev/null +++ b/testing/internal/goos/zgoos_darwin.go @@ -0,0 +1,26 @@ +// Code generated by gengoos.go using 'go generate'. DO NOT EDIT. + +//go:build !ios && darwin + +package goos + +const GOOS = `darwin` + +const IsAix = 0 +const IsAndroid = 0 +const IsDarwin = 1 +const IsDragonfly = 0 +const IsFreebsd = 0 +const IsHurd = 0 +const IsIllumos = 0 +const IsIos = 0 +const IsJs = 0 +const IsLinux = 0 +const IsNacl = 0 +const IsNetbsd = 0 +const IsOpenbsd = 0 +const IsPlan9 = 0 +const IsSolaris = 0 +const IsWasip1 = 0 +const IsWindows = 0 +const IsZos = 0 diff --git a/testing/internal/goos/zgoos_dragonfly.go b/testing/internal/goos/zgoos_dragonfly.go new file mode 100644 index 0000000..b92d126 --- /dev/null +++ b/testing/internal/goos/zgoos_dragonfly.go @@ -0,0 +1,26 @@ +// Code generated by gengoos.go using 'go generate'. DO NOT EDIT. + +//go:build dragonfly + +package goos + +const GOOS = `dragonfly` + +const IsAix = 0 +const IsAndroid = 0 +const IsDarwin = 0 +const IsDragonfly = 1 +const IsFreebsd = 0 +const IsHurd = 0 +const IsIllumos = 0 +const IsIos = 0 +const IsJs = 0 +const IsLinux = 0 +const IsNacl = 0 +const IsNetbsd = 0 +const IsOpenbsd = 0 +const IsPlan9 = 0 +const IsSolaris = 0 +const IsWasip1 = 0 +const IsWindows = 0 +const IsZos = 0 diff --git a/testing/internal/goos/zgoos_freebsd.go b/testing/internal/goos/zgoos_freebsd.go new file mode 100644 index 0000000..f547591 --- /dev/null +++ b/testing/internal/goos/zgoos_freebsd.go @@ -0,0 +1,26 @@ +// Code generated by gengoos.go using 'go generate'. DO NOT EDIT. + +//go:build freebsd + +package goos + +const GOOS = `freebsd` + +const IsAix = 0 +const IsAndroid = 0 +const IsDarwin = 0 +const IsDragonfly = 0 +const IsFreebsd = 1 +const IsHurd = 0 +const IsIllumos = 0 +const IsIos = 0 +const IsJs = 0 +const IsLinux = 0 +const IsNacl = 0 +const IsNetbsd = 0 +const IsOpenbsd = 0 +const IsPlan9 = 0 +const IsSolaris = 0 +const IsWasip1 = 0 +const IsWindows = 0 +const IsZos = 0 diff --git a/testing/internal/goos/zgoos_hurd.go b/testing/internal/goos/zgoos_hurd.go new file mode 100644 index 0000000..1189d65 --- /dev/null +++ b/testing/internal/goos/zgoos_hurd.go @@ -0,0 +1,26 @@ +// Code generated by gengoos.go using 'go generate'. DO NOT EDIT. + +//go:build hurd + +package goos + +const GOOS = `hurd` + +const IsAix = 0 +const IsAndroid = 0 +const IsDarwin = 0 +const IsDragonfly = 0 +const IsFreebsd = 0 +const IsHurd = 1 +const IsIllumos = 0 +const IsIos = 0 +const IsJs = 0 +const IsLinux = 0 +const IsNacl = 0 +const IsNetbsd = 0 +const IsOpenbsd = 0 +const IsPlan9 = 0 +const IsSolaris = 0 +const IsWasip1 = 0 +const IsWindows = 0 +const IsZos = 0 diff --git a/testing/internal/goos/zgoos_illumos.go b/testing/internal/goos/zgoos_illumos.go new file mode 100644 index 0000000..4f02540 --- /dev/null +++ b/testing/internal/goos/zgoos_illumos.go @@ -0,0 +1,26 @@ +// Code generated by gengoos.go using 'go generate'. DO NOT EDIT. + +//go:build illumos + +package goos + +const GOOS = `illumos` + +const IsAix = 0 +const IsAndroid = 0 +const IsDarwin = 0 +const IsDragonfly = 0 +const IsFreebsd = 0 +const IsHurd = 0 +const IsIllumos = 1 +const IsIos = 0 +const IsJs = 0 +const IsLinux = 0 +const IsNacl = 0 +const IsNetbsd = 0 +const IsOpenbsd = 0 +const IsPlan9 = 0 +const IsSolaris = 0 +const IsWasip1 = 0 +const IsWindows = 0 +const IsZos = 0 diff --git a/testing/internal/goos/zgoos_ios.go b/testing/internal/goos/zgoos_ios.go new file mode 100644 index 0000000..02f3586 --- /dev/null +++ b/testing/internal/goos/zgoos_ios.go @@ -0,0 +1,26 @@ +// Code generated by gengoos.go using 'go generate'. DO NOT EDIT. + +//go:build ios + +package goos + +const GOOS = `ios` + +const IsAix = 0 +const IsAndroid = 0 +const IsDarwin = 0 +const IsDragonfly = 0 +const IsFreebsd = 0 +const IsHurd = 0 +const IsIllumos = 0 +const IsIos = 1 +const IsJs = 0 +const IsLinux = 0 +const IsNacl = 0 +const IsNetbsd = 0 +const IsOpenbsd = 0 +const IsPlan9 = 0 +const IsSolaris = 0 +const IsWasip1 = 0 +const IsWindows = 0 +const IsZos = 0 diff --git a/testing/internal/goos/zgoos_js.go b/testing/internal/goos/zgoos_js.go new file mode 100644 index 0000000..4818741 --- /dev/null +++ b/testing/internal/goos/zgoos_js.go @@ -0,0 +1,26 @@ +// Code generated by gengoos.go using 'go generate'. DO NOT EDIT. + +//go:build js + +package goos + +const GOOS = `js` + +const IsAix = 0 +const IsAndroid = 0 +const IsDarwin = 0 +const IsDragonfly = 0 +const IsFreebsd = 0 +const IsHurd = 0 +const IsIllumos = 0 +const IsIos = 0 +const IsJs = 1 +const IsLinux = 0 +const IsNacl = 0 +const IsNetbsd = 0 +const IsOpenbsd = 0 +const IsPlan9 = 0 +const IsSolaris = 0 +const IsWasip1 = 0 +const IsWindows = 0 +const IsZos = 0 diff --git a/testing/internal/goos/zgoos_linux.go b/testing/internal/goos/zgoos_linux.go new file mode 100644 index 0000000..6f4d4e0 --- /dev/null +++ b/testing/internal/goos/zgoos_linux.go @@ -0,0 +1,26 @@ +// Code generated by gengoos.go using 'go generate'. DO NOT EDIT. + +//go:build !android && linux + +package goos + +const GOOS = `linux` + +const IsAix = 0 +const IsAndroid = 0 +const IsDarwin = 0 +const IsDragonfly = 0 +const IsFreebsd = 0 +const IsHurd = 0 +const IsIllumos = 0 +const IsIos = 0 +const IsJs = 0 +const IsLinux = 1 +const IsNacl = 0 +const IsNetbsd = 0 +const IsOpenbsd = 0 +const IsPlan9 = 0 +const IsSolaris = 0 +const IsWasip1 = 0 +const IsWindows = 0 +const IsZos = 0 diff --git a/testing/internal/goos/zgoos_netbsd.go b/testing/internal/goos/zgoos_netbsd.go new file mode 100644 index 0000000..948603d --- /dev/null +++ b/testing/internal/goos/zgoos_netbsd.go @@ -0,0 +1,26 @@ +// Code generated by gengoos.go using 'go generate'. DO NOT EDIT. + +//go:build netbsd + +package goos + +const GOOS = `netbsd` + +const IsAix = 0 +const IsAndroid = 0 +const IsDarwin = 0 +const IsDragonfly = 0 +const IsFreebsd = 0 +const IsHurd = 0 +const IsIllumos = 0 +const IsIos = 0 +const IsJs = 0 +const IsLinux = 0 +const IsNacl = 0 +const IsNetbsd = 1 +const IsOpenbsd = 0 +const IsPlan9 = 0 +const IsSolaris = 0 +const IsWasip1 = 0 +const IsWindows = 0 +const IsZos = 0 diff --git a/testing/internal/goos/zgoos_openbsd.go b/testing/internal/goos/zgoos_openbsd.go new file mode 100644 index 0000000..f4b2014 --- /dev/null +++ b/testing/internal/goos/zgoos_openbsd.go @@ -0,0 +1,26 @@ +// Code generated by gengoos.go using 'go generate'. DO NOT EDIT. + +//go:build openbsd + +package goos + +const GOOS = `openbsd` + +const IsAix = 0 +const IsAndroid = 0 +const IsDarwin = 0 +const IsDragonfly = 0 +const IsFreebsd = 0 +const IsHurd = 0 +const IsIllumos = 0 +const IsIos = 0 +const IsJs = 0 +const IsLinux = 0 +const IsNacl = 0 +const IsNetbsd = 0 +const IsOpenbsd = 1 +const IsPlan9 = 0 +const IsSolaris = 0 +const IsWasip1 = 0 +const IsWindows = 0 +const IsZos = 0 diff --git a/testing/internal/goos/zgoos_plan9.go b/testing/internal/goos/zgoos_plan9.go new file mode 100644 index 0000000..95572df --- /dev/null +++ b/testing/internal/goos/zgoos_plan9.go @@ -0,0 +1,26 @@ +// Code generated by gengoos.go using 'go generate'. DO NOT EDIT. + +//go:build plan9 + +package goos + +const GOOS = `plan9` + +const IsAix = 0 +const IsAndroid = 0 +const IsDarwin = 0 +const IsDragonfly = 0 +const IsFreebsd = 0 +const IsHurd = 0 +const IsIllumos = 0 +const IsIos = 0 +const IsJs = 0 +const IsLinux = 0 +const IsNacl = 0 +const IsNetbsd = 0 +const IsOpenbsd = 0 +const IsPlan9 = 1 +const IsSolaris = 0 +const IsWasip1 = 0 +const IsWindows = 0 +const IsZos = 0 diff --git a/testing/internal/goos/zgoos_solaris.go b/testing/internal/goos/zgoos_solaris.go new file mode 100644 index 0000000..c705826 --- /dev/null +++ b/testing/internal/goos/zgoos_solaris.go @@ -0,0 +1,26 @@ +// Code generated by gengoos.go using 'go generate'. DO NOT EDIT. + +//go:build !illumos && solaris + +package goos + +const GOOS = `solaris` + +const IsAix = 0 +const IsAndroid = 0 +const IsDarwin = 0 +const IsDragonfly = 0 +const IsFreebsd = 0 +const IsHurd = 0 +const IsIllumos = 0 +const IsIos = 0 +const IsJs = 0 +const IsLinux = 0 +const IsNacl = 0 +const IsNetbsd = 0 +const IsOpenbsd = 0 +const IsPlan9 = 0 +const IsSolaris = 1 +const IsWasip1 = 0 +const IsWindows = 0 +const IsZos = 0 diff --git a/testing/internal/goos/zgoos_wasip1.go b/testing/internal/goos/zgoos_wasip1.go new file mode 100644 index 0000000..ae35eeb --- /dev/null +++ b/testing/internal/goos/zgoos_wasip1.go @@ -0,0 +1,26 @@ +// Code generated by gengoos.go using 'go generate'. DO NOT EDIT. + +//go:build wasip1 + +package goos + +const GOOS = `wasip1` + +const IsAix = 0 +const IsAndroid = 0 +const IsDarwin = 0 +const IsDragonfly = 0 +const IsFreebsd = 0 +const IsHurd = 0 +const IsIllumos = 0 +const IsIos = 0 +const IsJs = 0 +const IsLinux = 0 +const IsNacl = 0 +const IsNetbsd = 0 +const IsOpenbsd = 0 +const IsPlan9 = 0 +const IsSolaris = 0 +const IsWasip1 = 1 +const IsWindows = 0 +const IsZos = 0 diff --git a/testing/internal/goos/zgoos_windows.go b/testing/internal/goos/zgoos_windows.go new file mode 100644 index 0000000..f89f4cf --- /dev/null +++ b/testing/internal/goos/zgoos_windows.go @@ -0,0 +1,26 @@ +// Code generated by gengoos.go using 'go generate'. DO NOT EDIT. + +//go:build windows + +package goos + +const GOOS = `windows` + +const IsAix = 0 +const IsAndroid = 0 +const IsDarwin = 0 +const IsDragonfly = 0 +const IsFreebsd = 0 +const IsHurd = 0 +const IsIllumos = 0 +const IsIos = 0 +const IsJs = 0 +const IsLinux = 0 +const IsNacl = 0 +const IsNetbsd = 0 +const IsOpenbsd = 0 +const IsPlan9 = 0 +const IsSolaris = 0 +const IsWasip1 = 0 +const IsWindows = 1 +const IsZos = 0 diff --git a/testing/internal/goos/zgoos_zos.go b/testing/internal/goos/zgoos_zos.go new file mode 100644 index 0000000..29fb0f8 --- /dev/null +++ b/testing/internal/goos/zgoos_zos.go @@ -0,0 +1,26 @@ +// Code generated by gengoos.go using 'go generate'. DO NOT EDIT. + +//go:build zos + +package goos + +const GOOS = `zos` + +const IsAix = 0 +const IsAndroid = 0 +const IsDarwin = 0 +const IsDragonfly = 0 +const IsFreebsd = 0 +const IsHurd = 0 +const IsIllumos = 0 +const IsIos = 0 +const IsJs = 0 +const IsLinux = 0 +const IsNacl = 0 +const IsNetbsd = 0 +const IsOpenbsd = 0 +const IsPlan9 = 0 +const IsSolaris = 0 +const IsWasip1 = 0 +const IsWindows = 0 +const IsZos = 1 diff --git a/testing/internal/platform/supported.go b/testing/internal/platform/supported.go new file mode 100644 index 0000000..e864c37 --- /dev/null +++ b/testing/internal/platform/supported.go @@ -0,0 +1,288 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate go test . -run=^TestGenerated$ -fix + +package platform + +// An OSArch is a pair of GOOS and GOARCH values indicating a platform. +type OSArch struct { + GOOS, GOARCH string +} + +func (p OSArch) String() string { + return p.GOOS + "/" + p.GOARCH +} + +// RaceDetectorSupported reports whether goos/goarch supports the race +// detector. There is a copy of this function in cmd/dist/test.go. +// Race detector only supports 48-bit VMA on arm64. But it will always +// return true for arm64, because we don't have VMA size information during +// the compile time. +func RaceDetectorSupported(goos, goarch string) bool { + switch goos { + case "linux": + return goarch == "amd64" || goarch == "ppc64le" || goarch == "arm64" || goarch == "s390x" + case "darwin": + return goarch == "amd64" || goarch == "arm64" + case "freebsd", "netbsd", "windows": + return goarch == "amd64" + default: + return false + } +} + +// MSanSupported reports whether goos/goarch supports the memory +// sanitizer option. +func MSanSupported(goos, goarch string) bool { + switch goos { + case "linux": + return goarch == "amd64" || goarch == "arm64" || goarch == "loong64" + case "freebsd": + return goarch == "amd64" + default: + return false + } +} + +// ASanSupported reports whether goos/goarch supports the address +// sanitizer option. +func ASanSupported(goos, goarch string) bool { + switch goos { + case "linux": + return goarch == "arm64" || goarch == "amd64" || goarch == "loong64" || goarch == "riscv64" || goarch == "ppc64le" + default: + return false + } +} + +// FuzzSupported reports whether goos/goarch supports fuzzing +// ('go test -fuzz=.'). +func FuzzSupported(goos, goarch string) bool { + switch goos { + case "darwin", "freebsd", "linux", "windows": + return true + default: + return false + } +} + +// FuzzInstrumented reports whether fuzzing on goos/goarch uses coverage +// instrumentation. (FuzzInstrumented implies FuzzSupported.) +func FuzzInstrumented(goos, goarch string) bool { + switch goarch { + case "amd64", "arm64": + // TODO(#14565): support more architectures. + return FuzzSupported(goos, goarch) + default: + return false + } +} + +// MustLinkExternal reports whether goos/goarch requires external linking +// with or without cgo dependencies. +func MustLinkExternal(goos, goarch string, withCgo bool) bool { + if withCgo { + switch goarch { + case "loong64", "mips", "mipsle", "mips64", "mips64le": + // Internally linking cgo is incomplete on some architectures. + // https://go.dev/issue/14449 + return true + case "arm64": + if goos == "windows" { + // windows/arm64 internal linking is not implemented. + return true + } + case "ppc64": + // Big Endian PPC64 cgo internal linking is not implemented for aix or linux. + // https://go.dev/issue/8912 + if goos == "aix" || goos == "linux" { + return true + } + } + + switch goos { + case "android": + return true + case "dragonfly": + // It seems that on Dragonfly thread local storage is + // set up by the dynamic linker, so internal cgo linking + // doesn't work. Test case is "go test runtime/cgo". + return true + } + } + + switch goos { + case "android": + if goarch != "arm64" { + return true + } + case "ios": + if goarch == "arm64" { + return true + } + } + return false +} + +// BuildModeSupported reports whether goos/goarch supports the given build mode +// using the given compiler. +// There is a copy of this function in cmd/dist/test.go. +func BuildModeSupported(compiler, buildmode, goos, goarch string) bool { + if compiler == "gccgo" { + return true + } + + if _, ok := distInfo[OSArch{goos, goarch}]; !ok { + return false // platform unrecognized + } + + platform := goos + "/" + goarch + switch buildmode { + case "archive": + return true + + case "c-archive": + switch goos { + case "aix", "darwin", "ios", "windows": + return true + case "linux": + switch goarch { + case "386", "amd64", "arm", "armbe", "arm64", "arm64be", "loong64", "ppc64le", "riscv64", "s390x": + // linux/ppc64 not supported because it does + // not support external linking mode yet. + return true + default: + // Other targets do not support -shared, + // per ParseFlags in + // cmd/compile/internal/base/flag.go. + // For c-archive the Go tool passes -shared, + // so that the result is suitable for inclusion + // in a PIE or shared library. + return false + } + case "freebsd": + return goarch == "amd64" + } + return false + + case "c-shared": + switch platform { + case "linux/amd64", "linux/arm", "linux/arm64", "linux/loong64", "linux/386", "linux/ppc64le", "linux/riscv64", "linux/s390x", + "android/amd64", "android/arm", "android/arm64", "android/386", + "freebsd/amd64", + "darwin/amd64", "darwin/arm64", + "windows/amd64", "windows/386", "windows/arm64", + "wasip1/wasm": + return true + } + return false + + case "default": + return true + + case "exe": + return true + + case "pie": + switch platform { + case "linux/386", "linux/amd64", "linux/arm", "linux/arm64", "linux/loong64", "linux/ppc64le", "linux/riscv64", "linux/s390x", + "android/amd64", "android/arm", "android/arm64", "android/386", + "freebsd/amd64", + "darwin/amd64", "darwin/arm64", + "ios/amd64", "ios/arm64", + "aix/ppc64", + "openbsd/arm64", + "windows/386", "windows/amd64", "windows/arm", "windows/arm64": + return true + } + return false + + case "shared": + switch platform { + case "linux/386", "linux/amd64", "linux/arm", "linux/arm64", "linux/ppc64le", "linux/s390x": + return true + } + return false + + case "plugin": + switch platform { + case "linux/amd64", "linux/arm", "linux/arm64", "linux/386", "linux/loong64", "linux/s390x", "linux/ppc64le", + "android/amd64", "android/386", + "darwin/amd64", "darwin/arm64", + "freebsd/amd64": + return true + } + return false + + default: + return false + } +} + +func InternalLinkPIESupported(goos, goarch string) bool { + switch goos + "/" + goarch { + case "android/arm64", + "darwin/amd64", "darwin/arm64", + "linux/amd64", "linux/arm64", "linux/ppc64le", + "windows/386", "windows/amd64", "windows/arm", "windows/arm64": + return true + } + return false +} + +// DefaultPIE reports whether goos/goarch produces a PIE binary when using the +// "default" buildmode. On Windows this is affected by -race, +// so force the caller to pass that in to centralize that choice. +func DefaultPIE(goos, goarch string, isRace bool) bool { + switch goos { + case "android", "ios": + return true + case "windows": + if isRace { + // PIE is not supported with -race on windows; + // see https://go.dev/cl/416174. + return false + } + return true + case "darwin": + return true + } + return false +} + +// ExecutableHasDWARF reports whether the linked executable includes DWARF +// symbols on goos/goarch. +func ExecutableHasDWARF(goos, goarch string) bool { + switch goos { + case "plan9", "ios": + return false + } + return true +} + +// osArchInfo describes information about an OSArch extracted from cmd/dist and +// stored in the generated distInfo map. +type osArchInfo struct { + CgoSupported bool + FirstClass bool + Broken bool +} + +// CgoSupported reports whether goos/goarch supports cgo. +func CgoSupported(goos, goarch string) bool { + return distInfo[OSArch{goos, goarch}].CgoSupported +} + +// FirstClass reports whether goos/goarch is considered a “first class” port. +// (See https://go.dev/wiki/PortingPolicy#first-class-ports.) +func FirstClass(goos, goarch string) bool { + return distInfo[OSArch{goos, goarch}].FirstClass +} + +// Broken reports whether goos/goarch is considered a broken port. +// (See https://go.dev/wiki/PortingPolicy#broken-ports.) +func Broken(goos, goarch string) bool { + return distInfo[OSArch{goos, goarch}].Broken +} diff --git a/testing/internal/platform/zosarch.go b/testing/internal/platform/zosarch.go new file mode 100644 index 0000000..ebde978 --- /dev/null +++ b/testing/internal/platform/zosarch.go @@ -0,0 +1,116 @@ +// Code generated by go test internal/platform -fix. DO NOT EDIT. + +// To change the information in this file, edit the cgoEnabled and/or firstClass +// maps in cmd/dist/build.go, then run 'go generate internal/platform'. + +package platform + +// List is the list of all valid GOOS/GOARCH combinations, +// including known-broken ports. +var List = []OSArch{ + {"aix", "ppc64"}, + {"android", "386"}, + {"android", "amd64"}, + {"android", "arm"}, + {"android", "arm64"}, + {"darwin", "amd64"}, + {"darwin", "arm64"}, + {"dragonfly", "amd64"}, + {"freebsd", "386"}, + {"freebsd", "amd64"}, + {"freebsd", "arm"}, + {"freebsd", "arm64"}, + {"freebsd", "riscv64"}, + {"illumos", "amd64"}, + {"ios", "amd64"}, + {"ios", "arm64"}, + {"js", "wasm"}, + {"linux", "386"}, + {"linux", "amd64"}, + {"linux", "arm"}, + {"linux", "arm64"}, + {"linux", "loong64"}, + {"linux", "mips"}, + {"linux", "mips64"}, + {"linux", "mips64le"}, + {"linux", "mipsle"}, + {"linux", "ppc64"}, + {"linux", "ppc64le"}, + {"linux", "riscv64"}, + {"linux", "s390x"}, + {"linux", "sparc64"}, + {"netbsd", "386"}, + {"netbsd", "amd64"}, + {"netbsd", "arm"}, + {"netbsd", "arm64"}, + {"openbsd", "386"}, + {"openbsd", "amd64"}, + {"openbsd", "arm"}, + {"openbsd", "arm64"}, + {"openbsd", "mips64"}, + {"openbsd", "ppc64"}, + {"openbsd", "riscv64"}, + {"plan9", "386"}, + {"plan9", "amd64"}, + {"plan9", "arm"}, + {"solaris", "amd64"}, + {"wasip1", "wasm"}, + {"windows", "386"}, + {"windows", "amd64"}, + {"windows", "arm"}, + {"windows", "arm64"}, +} + +var distInfo = map[OSArch]osArchInfo{ + {"aix", "ppc64"}: {CgoSupported: true}, + {"android", "386"}: {CgoSupported: true}, + {"android", "amd64"}: {CgoSupported: true}, + {"android", "arm"}: {CgoSupported: true}, + {"android", "arm64"}: {CgoSupported: true}, + {"darwin", "amd64"}: {CgoSupported: true, FirstClass: true}, + {"darwin", "arm64"}: {CgoSupported: true, FirstClass: true}, + {"dragonfly", "amd64"}: {CgoSupported: true}, + {"freebsd", "386"}: {CgoSupported: true}, + {"freebsd", "amd64"}: {CgoSupported: true}, + {"freebsd", "arm"}: {CgoSupported: true}, + {"freebsd", "arm64"}: {CgoSupported: true}, + {"freebsd", "riscv64"}: {CgoSupported: true}, + {"illumos", "amd64"}: {CgoSupported: true}, + {"ios", "amd64"}: {CgoSupported: true}, + {"ios", "arm64"}: {CgoSupported: true}, + {"js", "wasm"}: {}, + {"linux", "386"}: {CgoSupported: true, FirstClass: true}, + {"linux", "amd64"}: {CgoSupported: true, FirstClass: true}, + {"linux", "arm"}: {CgoSupported: true, FirstClass: true}, + {"linux", "arm64"}: {CgoSupported: true, FirstClass: true}, + {"linux", "loong64"}: {CgoSupported: true}, + {"linux", "mips"}: {CgoSupported: true}, + {"linux", "mips64"}: {CgoSupported: true}, + {"linux", "mips64le"}: {CgoSupported: true}, + {"linux", "mipsle"}: {CgoSupported: true}, + {"linux", "ppc64"}: {}, + {"linux", "ppc64le"}: {CgoSupported: true}, + {"linux", "riscv64"}: {CgoSupported: true}, + {"linux", "s390x"}: {CgoSupported: true}, + {"linux", "sparc64"}: {CgoSupported: true, Broken: true}, + {"netbsd", "386"}: {CgoSupported: true}, + {"netbsd", "amd64"}: {CgoSupported: true}, + {"netbsd", "arm"}: {CgoSupported: true}, + {"netbsd", "arm64"}: {CgoSupported: true}, + {"openbsd", "386"}: {CgoSupported: true}, + {"openbsd", "amd64"}: {CgoSupported: true}, + {"openbsd", "arm"}: {CgoSupported: true}, + {"openbsd", "arm64"}: {CgoSupported: true}, + {"openbsd", "mips64"}: {CgoSupported: true, Broken: true}, + {"openbsd", "ppc64"}: {}, + {"openbsd", "riscv64"}: {CgoSupported: true}, + {"plan9", "386"}: {}, + {"plan9", "amd64"}: {}, + {"plan9", "arm"}: {}, + {"solaris", "amd64"}: {CgoSupported: true}, + {"wasip1", "wasm"}: {}, + {"windows", "386"}: {CgoSupported: true, FirstClass: true}, + {"windows", "amd64"}: {CgoSupported: true, FirstClass: true}, + {"windows", "arm"}: {Broken: true}, + {"windows", "arm64"}: {CgoSupported: true}, +} diff --git a/testing/internal/platform/zosarch_test.go b/testing/internal/platform/zosarch_test.go new file mode 100644 index 0000000..baf2317 --- /dev/null +++ b/testing/internal/platform/zosarch_test.go @@ -0,0 +1,110 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package platform_test + +import ( + "bytes" + "encoding/json" + "flag" + "os" + "os/exec" + "testing" + "text/template" + + "github.com/CodSpeedHQ/codspeed-go/testing/internal/diff" + "github.com/CodSpeedHQ/codspeed-go/testing/internal/testenv" +) + +var flagFix = flag.Bool("fix", false, "if true, fix out-of-date generated files") + +// TestGenerated verifies that zosarch.go is up to date, +// or regenerates it if the -fix flag is set. +func TestGenerated(t *testing.T) { + testenv.MustHaveGoRun(t) + + // Here we use 'go run cmd/dist' instead of 'go tool dist' in case the + // installed cmd/dist is stale or missing. We don't want to miss a + // skew in the data due to a stale binary. + cmd := testenv.Command(t, "go", "run", "cmd/dist", "list", "-json", "-broken") + + // cmd/dist requires GOROOT to be set explicitly in the environment. + cmd.Env = append(cmd.Environ(), "GOROOT="+testenv.GOROOT(t)) + + out, err := cmd.Output() + if err != nil { + if ee, ok := err.(*exec.ExitError); ok && len(ee.Stderr) > 0 { + t.Logf("stderr:\n%s", ee.Stderr) + } + t.Fatalf("%v: %v", cmd, err) + } + + type listEntry struct { + GOOS, GOARCH string + CgoSupported bool + FirstClass bool + Broken bool + } + var entries []listEntry + if err := json.Unmarshal(out, &entries); err != nil { + t.Fatal(err) + } + + tmplOut := new(bytes.Buffer) + tmpl := template.Must(template.New("zosarch").Parse(zosarchTmpl)) + err = tmpl.Execute(tmplOut, entries) + if err != nil { + t.Fatal(err) + } + + cmd = testenv.Command(t, "gofmt") + cmd.Stdin = bytes.NewReader(tmplOut.Bytes()) + want, err := cmd.Output() + if err != nil { + t.Logf("stdin:\n%s", tmplOut.Bytes()) + if ee, ok := err.(*exec.ExitError); ok && len(ee.Stderr) > 0 { + t.Logf("stderr:\n%s", ee.Stderr) + } + t.Fatalf("%v: %v", cmd, err) + } + + got, err := os.ReadFile("zosarch.go") + if err == nil && bytes.Equal(got, want) { + return + } + + if !*flagFix { + if err != nil { + t.Log(err) + } else { + t.Logf("diff:\n%s", diff.Diff("zosarch.go", got, "want", want)) + } + t.Fatalf("zosarch.go is missing or out of date; to regenerate, run\ngo generate internal/platform") + } + + if err := os.WriteFile("zosarch.go", want, 0666); err != nil { + t.Fatal(err) + } +} + +const zosarchTmpl = `// Code generated by go test internal/platform -fix. DO NOT EDIT. + +// To change the information in this file, edit the cgoEnabled and/or firstClass +// maps in cmd/dist/build.go, then run 'go generate internal/platform'. + +package platform + +// List is the list of all valid GOOS/GOARCH combinations, +// including known-broken ports. +var List = []OSArch{ +{{range .}} { {{ printf "%q" .GOOS }}, {{ printf "%q" .GOARCH }} }, +{{end}} +} + +var distInfo = map[OSArch]osArchInfo { +{{range .}} { {{ printf "%q" .GOOS }}, {{ printf "%q" .GOARCH }} }: +{ {{if .CgoSupported}}CgoSupported: true, {{end}}{{if .FirstClass}}FirstClass: true, {{end}}{{if .Broken}} Broken: true, {{end}} }, +{{end}} +} +` diff --git a/testing/internal/race/doc.go b/testing/internal/race/doc.go new file mode 100644 index 0000000..8fa44ce --- /dev/null +++ b/testing/internal/race/doc.go @@ -0,0 +1,11 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package race contains helper functions for manually instrumenting code for the race detector. + +The runtime package intentionally exports these functions only in the race build; +this package exports them unconditionally but without the "race" build tag they are no-ops. +*/ +package race diff --git a/testing/internal/race/norace.go b/testing/internal/race/norace.go new file mode 100644 index 0000000..346564b --- /dev/null +++ b/testing/internal/race/norace.go @@ -0,0 +1,54 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !race + +package race + +import ( + "unsafe" +) + +const Enabled = false + +func Acquire(addr unsafe.Pointer) { +} + +func Release(addr unsafe.Pointer) { +} + +func ReleaseMerge(addr unsafe.Pointer) { +} + +func Disable() { +} + +func Enable() { +} + +func Read(addr unsafe.Pointer) { +} + +func ReadPC(addr unsafe.Pointer, callerpc, pc uintptr) { +} + +// func ReadObjectPC(t *abi.Type, addr unsafe.Pointer, callerpc, pc uintptr) { +// } + +func Write(addr unsafe.Pointer) { +} + +func WritePC(addr unsafe.Pointer, callerpc, pc uintptr) { +} + +// func WriteObjectPC(t *abi.Type, addr unsafe.Pointer, callerpc, pc uintptr) { +// } + +func ReadRange(addr unsafe.Pointer, len int) { +} + +func WriteRange(addr unsafe.Pointer, len int) { +} + +func Errors() int { return 0 } diff --git a/testing/internal/race/race.go b/testing/internal/race/race.go new file mode 100644 index 0000000..ef54ea5 --- /dev/null +++ b/testing/internal/race/race.go @@ -0,0 +1,57 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build race + +package race + +import ( + "unsafe" +) + +const Enabled = true + +// Functions below pushed from runtime. + +//go:linkname Acquire +func Acquire(addr unsafe.Pointer) + +//go:linkname Release +func Release(addr unsafe.Pointer) + +//go:linkname ReleaseMerge +func ReleaseMerge(addr unsafe.Pointer) + +//go:linkname Disable +func Disable() + +//go:linkname Enable +func Enable() + +//go:linkname Read +func Read(addr unsafe.Pointer) + +//go:linkname ReadPC +func ReadPC(addr unsafe.Pointer, callerpc, pc uintptr) + +// //go:linkname ReadObjectPC +// func ReadObjectPC(t *abi.Type, addr unsafe.Pointer, callerpc, pc uintptr) + +//go:linkname Write +func Write(addr unsafe.Pointer) + +//go:linkname WritePC +func WritePC(addr unsafe.Pointer, callerpc, pc uintptr) + +// //go:linkname WriteObjectPC +// func WriteObjectPC(t *abi.Type, addr unsafe.Pointer, callerpc, pc uintptr) + +//go:linkname ReadRange +func ReadRange(addr unsafe.Pointer, len int) + +//go:linkname WriteRange +func WriteRange(addr unsafe.Pointer, len int) + +//go:linkname Errors +func Errors() int diff --git a/testing/internal/runtime/atomic/atomic_386.go b/testing/internal/runtime/atomic/atomic_386.go new file mode 100644 index 0000000..b6cdea6 --- /dev/null +++ b/testing/internal/runtime/atomic/atomic_386.go @@ -0,0 +1,125 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build 386 + +package atomic + +import "unsafe" + +// Export some functions via linkname to assembly in sync/atomic. +// +//go:linkname Load +//go:linkname Loadp +//go:linkname LoadAcquintptr + +//go:nosplit +//go:noinline +func Load(ptr *uint32) uint32 { + return *ptr +} + +//go:nosplit +//go:noinline +func Loadp(ptr unsafe.Pointer) unsafe.Pointer { + return *(*unsafe.Pointer)(ptr) +} + +//go:nosplit +//go:noinline +func LoadAcq(ptr *uint32) uint32 { + return *ptr +} + +//go:nosplit +//go:noinline +func LoadAcquintptr(ptr *uintptr) uintptr { + return *ptr +} + +//go:noescape +func Xadd64(ptr *uint64, delta int64) uint64 + +//go:noescape +func Xadduintptr(ptr *uintptr, delta uintptr) uintptr + +//go:noescape +func Xadd(ptr *uint32, delta int32) uint32 + +//go:noescape +func Xchg64(ptr *uint64, new uint64) uint64 + +//go:noescape +func Xchg(ptr *uint32, new uint32) uint32 + +//go:noescape +func Xchg8(ptr *uint8, new uint8) uint8 + +//go:noescape +func Xchguintptr(ptr *uintptr, new uintptr) uintptr + +//go:noescape +func Load64(ptr *uint64) uint64 + +//go:nosplit +//go:noinline +func Load8(ptr *uint8) uint8 { + return *ptr +} + +//go:noescape +func And8(ptr *uint8, val uint8) + +//go:noescape +func Or8(ptr *uint8, val uint8) + +//go:noescape +func And(ptr *uint32, val uint32) + +//go:noescape +func Or(ptr *uint32, val uint32) + +//go:noescape +func And32(ptr *uint32, val uint32) uint32 + +//go:noescape +func Or32(ptr *uint32, val uint32) uint32 + +//go:noescape +func And64(ptr *uint64, val uint64) uint64 + +//go:noescape +func Or64(ptr *uint64, val uint64) uint64 + +//go:noescape +func Anduintptr(ptr *uintptr, val uintptr) uintptr + +//go:noescape +func Oruintptr(ptr *uintptr, val uintptr) uintptr + +// NOTE: Do not add atomicxor8 (XOR is not idempotent). + +//go:noescape +func Cas64(ptr *uint64, old, new uint64) bool + +//go:noescape +func CasRel(ptr *uint32, old, new uint32) bool + +//go:noescape +func Store(ptr *uint32, val uint32) + +//go:noescape +func Store8(ptr *uint8, val uint8) + +//go:noescape +func Store64(ptr *uint64, val uint64) + +//go:noescape +func StoreRel(ptr *uint32, val uint32) + +//go:noescape +func StoreReluintptr(ptr *uintptr, val uintptr) + +// NO go:noescape annotation; see atomic_pointer.go. +func StorepNoWB(ptr unsafe.Pointer, val unsafe.Pointer) diff --git a/testing/internal/runtime/atomic/atomic_386.s b/testing/internal/runtime/atomic/atomic_386.s new file mode 100644 index 0000000..58a56e6 --- /dev/null +++ b/testing/internal/runtime/atomic/atomic_386.s @@ -0,0 +1,374 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" +#include "funcdata.h" + +// bool Cas(int32 *val, int32 old, int32 new) +// Atomically: +// if(*val == old){ +// *val = new; +// return 1; +// }else +// return 0; +TEXT ·Cas(SB), NOSPLIT, $0-13 + MOVL ptr+0(FP), BX + MOVL old+4(FP), AX + MOVL new+8(FP), CX + LOCK + CMPXCHGL CX, 0(BX) + SETEQ ret+12(FP) + RET + +TEXT ·Casint32(SB), NOSPLIT, $0-13 + JMP ·Cas(SB) + +TEXT ·Casint64(SB), NOSPLIT, $0-21 + JMP ·Cas64(SB) + +TEXT ·Casuintptr(SB), NOSPLIT, $0-13 + JMP ·Cas(SB) + +TEXT ·CasRel(SB), NOSPLIT, $0-13 + JMP ·Cas(SB) + +TEXT ·Loaduintptr(SB), NOSPLIT, $0-8 + JMP ·Load(SB) + +TEXT ·Loaduint(SB), NOSPLIT, $0-8 + JMP ·Load(SB) + +TEXT ·Storeint32(SB), NOSPLIT, $0-8 + JMP ·Store(SB) + +TEXT ·Storeint64(SB), NOSPLIT, $0-12 + JMP ·Store64(SB) + +TEXT ·Storeuintptr(SB), NOSPLIT, $0-8 + JMP ·Store(SB) + +TEXT ·Xadduintptr(SB), NOSPLIT, $0-12 + JMP ·Xadd(SB) + +TEXT ·Loadint32(SB), NOSPLIT, $0-8 + JMP ·Load(SB) + +TEXT ·Loadint64(SB), NOSPLIT, $0-12 + JMP ·Load64(SB) + +TEXT ·Xaddint32(SB), NOSPLIT, $0-12 + JMP ·Xadd(SB) + +TEXT ·Xaddint64(SB), NOSPLIT, $0-20 + JMP ·Xadd64(SB) + +// bool ·Cas64(uint64 *val, uint64 old, uint64 new) +// Atomically: +// if(*val == old){ +// *val = new; +// return 1; +// } else { +// return 0; +// } +TEXT ·Cas64(SB), NOSPLIT, $0-21 + NO_LOCAL_POINTERS + MOVL ptr+0(FP), BP + TESTL $7, BP + JZ 2(PC) + CALL ·panicUnaligned(SB) + MOVL old_lo+4(FP), AX + MOVL old_hi+8(FP), DX + MOVL new_lo+12(FP), BX + MOVL new_hi+16(FP), CX + LOCK + CMPXCHG8B 0(BP) + SETEQ ret+20(FP) + RET + +// bool Casp1(void **p, void *old, void *new) +// Atomically: +// if(*p == old){ +// *p = new; +// return 1; +// }else +// return 0; +TEXT ·Casp1(SB), NOSPLIT, $0-13 + MOVL ptr+0(FP), BX + MOVL old+4(FP), AX + MOVL new+8(FP), CX + LOCK + CMPXCHGL CX, 0(BX) + SETEQ ret+12(FP) + RET + +// uint32 Xadd(uint32 volatile *val, int32 delta) +// Atomically: +// *val += delta; +// return *val; +TEXT ·Xadd(SB), NOSPLIT, $0-12 + MOVL ptr+0(FP), BX + MOVL delta+4(FP), AX + MOVL AX, CX + LOCK + XADDL AX, 0(BX) + ADDL CX, AX + MOVL AX, ret+8(FP) + RET + +TEXT ·Xadd64(SB), NOSPLIT, $0-20 + NO_LOCAL_POINTERS + // no XADDQ so use CMPXCHG8B loop + MOVL ptr+0(FP), BP + TESTL $7, BP + JZ 2(PC) + CALL ·panicUnaligned(SB) + // DI:SI = delta + MOVL delta_lo+4(FP), SI + MOVL delta_hi+8(FP), DI + // DX:AX = *addr + MOVL 0(BP), AX + MOVL 4(BP), DX +addloop: + // CX:BX = DX:AX (*addr) + DI:SI (delta) + MOVL AX, BX + MOVL DX, CX + ADDL SI, BX + ADCL DI, CX + + // if *addr == DX:AX { + // *addr = CX:BX + // } else { + // DX:AX = *addr + // } + // all in one instruction + LOCK + CMPXCHG8B 0(BP) + + JNZ addloop + + // success + // return CX:BX + MOVL BX, ret_lo+12(FP) + MOVL CX, ret_hi+16(FP) + RET + +// uint8 Xchg8(uint8 *ptr, uint8 new) +TEXT ·Xchg8(SB), NOSPLIT, $0-9 + MOVL ptr+0(FP), BX + MOVB new+4(FP), AX + XCHGB AX, 0(BX) + MOVB AX, ret+8(FP) + RET + +TEXT ·Xchg(SB), NOSPLIT, $0-12 + MOVL ptr+0(FP), BX + MOVL new+4(FP), AX + XCHGL AX, 0(BX) + MOVL AX, ret+8(FP) + RET + +TEXT ·Xchgint32(SB), NOSPLIT, $0-12 + JMP ·Xchg(SB) + +TEXT ·Xchgint64(SB), NOSPLIT, $0-20 + JMP ·Xchg64(SB) + +TEXT ·Xchguintptr(SB), NOSPLIT, $0-12 + JMP ·Xchg(SB) + +TEXT ·Xchg64(SB),NOSPLIT,$0-20 + NO_LOCAL_POINTERS + // no XCHGQ so use CMPXCHG8B loop + MOVL ptr+0(FP), BP + TESTL $7, BP + JZ 2(PC) + CALL ·panicUnaligned(SB) + // CX:BX = new + MOVL new_lo+4(FP), BX + MOVL new_hi+8(FP), CX + // DX:AX = *addr + MOVL 0(BP), AX + MOVL 4(BP), DX +swaploop: + // if *addr == DX:AX + // *addr = CX:BX + // else + // DX:AX = *addr + // all in one instruction + LOCK + CMPXCHG8B 0(BP) + JNZ swaploop + + // success + // return DX:AX + MOVL AX, ret_lo+12(FP) + MOVL DX, ret_hi+16(FP) + RET + +TEXT ·StorepNoWB(SB), NOSPLIT, $0-8 + MOVL ptr+0(FP), BX + MOVL val+4(FP), AX + XCHGL AX, 0(BX) + RET + +TEXT ·Store(SB), NOSPLIT, $0-8 + MOVL ptr+0(FP), BX + MOVL val+4(FP), AX + XCHGL AX, 0(BX) + RET + +TEXT ·StoreRel(SB), NOSPLIT, $0-8 + JMP ·Store(SB) + +TEXT ·StoreReluintptr(SB), NOSPLIT, $0-8 + JMP ·Store(SB) + +// uint64 atomicload64(uint64 volatile* addr); +TEXT ·Load64(SB), NOSPLIT, $0-12 + NO_LOCAL_POINTERS + MOVL ptr+0(FP), AX + TESTL $7, AX + JZ 2(PC) + CALL ·panicUnaligned(SB) + MOVQ (AX), M0 + MOVQ M0, ret+4(FP) + EMMS + RET + +// void ·Store64(uint64 volatile* addr, uint64 v); +TEXT ·Store64(SB), NOSPLIT, $0-12 + NO_LOCAL_POINTERS + MOVL ptr+0(FP), AX + TESTL $7, AX + JZ 2(PC) + CALL ·panicUnaligned(SB) + // MOVQ and EMMS were introduced on the Pentium MMX. + MOVQ val+4(FP), M0 + MOVQ M0, (AX) + EMMS + // This is essentially a no-op, but it provides required memory fencing. + // It can be replaced with MFENCE, but MFENCE was introduced only on the Pentium4 (SSE2). + XORL AX, AX + LOCK + XADDL AX, (SP) + RET + +// void ·Or8(byte volatile*, byte); +TEXT ·Or8(SB), NOSPLIT, $0-5 + MOVL ptr+0(FP), AX + MOVB val+4(FP), BX + LOCK + ORB BX, (AX) + RET + +// void ·And8(byte volatile*, byte); +TEXT ·And8(SB), NOSPLIT, $0-5 + MOVL ptr+0(FP), AX + MOVB val+4(FP), BX + LOCK + ANDB BX, (AX) + RET + +TEXT ·Store8(SB), NOSPLIT, $0-5 + MOVL ptr+0(FP), BX + MOVB val+4(FP), AX + XCHGB AX, 0(BX) + RET + +// func Or(addr *uint32, v uint32) +TEXT ·Or(SB), NOSPLIT, $0-8 + MOVL ptr+0(FP), AX + MOVL val+4(FP), BX + LOCK + ORL BX, (AX) + RET + +// func And(addr *uint32, v uint32) +TEXT ·And(SB), NOSPLIT, $0-8 + MOVL ptr+0(FP), AX + MOVL val+4(FP), BX + LOCK + ANDL BX, (AX) + RET + +// func And32(addr *uint32, v uint32) old uint32 +TEXT ·And32(SB), NOSPLIT, $0-12 + MOVL ptr+0(FP), BX + MOVL val+4(FP), CX +casloop: + MOVL CX, DX + MOVL (BX), AX + ANDL AX, DX + LOCK + CMPXCHGL DX, (BX) + JNZ casloop + MOVL AX, ret+8(FP) + RET + +// func Or32(addr *uint32, v uint32) old uint32 +TEXT ·Or32(SB), NOSPLIT, $0-12 + MOVL ptr+0(FP), BX + MOVL val+4(FP), CX +casloop: + MOVL CX, DX + MOVL (BX), AX + ORL AX, DX + LOCK + CMPXCHGL DX, (BX) + JNZ casloop + MOVL AX, ret+8(FP) + RET + +// func And64(addr *uint64, v uint64) old uint64 +TEXT ·And64(SB), NOSPLIT, $0-20 + MOVL ptr+0(FP), BP + // DI:SI = v + MOVL val_lo+4(FP), SI + MOVL val_hi+8(FP), DI + // DX:AX = *addr + MOVL 0(BP), AX + MOVL 4(BP), DX +casloop: + // CX:BX = DX:AX (*addr) & DI:SI (mask) + MOVL AX, BX + MOVL DX, CX + ANDL SI, BX + ANDL DI, CX + LOCK + CMPXCHG8B 0(BP) + JNZ casloop + MOVL AX, ret_lo+12(FP) + MOVL DX, ret_hi+16(FP) + RET + + +// func Or64(addr *uint64, v uint64) old uint64 +TEXT ·Or64(SB), NOSPLIT, $0-20 + MOVL ptr+0(FP), BP + // DI:SI = v + MOVL val_lo+4(FP), SI + MOVL val_hi+8(FP), DI + // DX:AX = *addr + MOVL 0(BP), AX + MOVL 4(BP), DX +casloop: + // CX:BX = DX:AX (*addr) | DI:SI (mask) + MOVL AX, BX + MOVL DX, CX + ORL SI, BX + ORL DI, CX + LOCK + CMPXCHG8B 0(BP) + JNZ casloop + MOVL AX, ret_lo+12(FP) + MOVL DX, ret_hi+16(FP) + RET + +// func Anduintptr(addr *uintptr, v uintptr) old uintptr +TEXT ·Anduintptr(SB), NOSPLIT, $0-12 + JMP ·And32(SB) + +// func Oruintptr(addr *uintptr, v uintptr) old uintptr +TEXT ·Oruintptr(SB), NOSPLIT, $0-12 + JMP ·Or32(SB) diff --git a/testing/internal/runtime/atomic/atomic_amd64.go b/testing/internal/runtime/atomic/atomic_amd64.go new file mode 100644 index 0000000..2a2d07e --- /dev/null +++ b/testing/internal/runtime/atomic/atomic_amd64.go @@ -0,0 +1,138 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package atomic + +import "unsafe" + +// Export some functions via linkname to assembly in sync/atomic. +// +//go:linkname Load +//go:linkname Loadp +//go:linkname Load64 + +//go:nosplit +//go:noinline +func Load(ptr *uint32) uint32 { + return *ptr +} + +//go:nosplit +//go:noinline +func Loadp(ptr unsafe.Pointer) unsafe.Pointer { + return *(*unsafe.Pointer)(ptr) +} + +//go:nosplit +//go:noinline +func Load64(ptr *uint64) uint64 { + return *ptr +} + +//go:nosplit +//go:noinline +func LoadAcq(ptr *uint32) uint32 { + return *ptr +} + +//go:nosplit +//go:noinline +func LoadAcq64(ptr *uint64) uint64 { + return *ptr +} + +//go:nosplit +//go:noinline +func LoadAcquintptr(ptr *uintptr) uintptr { + return *ptr +} + +//go:noescape +func Xadd(ptr *uint32, delta int32) uint32 + +//go:noescape +func Xadd64(ptr *uint64, delta int64) uint64 + +//go:noescape +func Xadduintptr(ptr *uintptr, delta uintptr) uintptr + +//go:noescape +func Xchg8(ptr *uint8, new uint8) uint8 + +//go:noescape +func Xchg(ptr *uint32, new uint32) uint32 + +//go:noescape +func Xchg64(ptr *uint64, new uint64) uint64 + +//go:noescape +func Xchguintptr(ptr *uintptr, new uintptr) uintptr + +//go:nosplit +//go:noinline +func Load8(ptr *uint8) uint8 { + return *ptr +} + +//go:noescape +func And8(ptr *uint8, val uint8) + +//go:noescape +func Or8(ptr *uint8, val uint8) + +//go:noescape +func And(ptr *uint32, val uint32) + +//go:noescape +func Or(ptr *uint32, val uint32) + +//go:noescape +func And32(ptr *uint32, val uint32) uint32 + +//go:noescape +func Or32(ptr *uint32, val uint32) uint32 + +//go:noescape +func And64(ptr *uint64, val uint64) uint64 + +//go:noescape +func Or64(ptr *uint64, val uint64) uint64 + +//go:noescape +func Anduintptr(ptr *uintptr, val uintptr) uintptr + +//go:noescape +func Oruintptr(ptr *uintptr, val uintptr) uintptr + +// NOTE: Do not add atomicxor8 (XOR is not idempotent). + +//go:noescape +func Cas64(ptr *uint64, old, new uint64) bool + +//go:noescape +func CasRel(ptr *uint32, old, new uint32) bool + +//go:noescape +func Store(ptr *uint32, val uint32) + +//go:noescape +func Store8(ptr *uint8, val uint8) + +//go:noescape +func Store64(ptr *uint64, val uint64) + +//go:noescape +func StoreRel(ptr *uint32, val uint32) + +//go:noescape +func StoreRel64(ptr *uint64, val uint64) + +//go:noescape +func StoreReluintptr(ptr *uintptr, val uintptr) + +// StorepNoWB performs *ptr = val atomically and without a write +// barrier. +// +// NO go:noescape annotation; see atomic_pointer.go. +func StorepNoWB(ptr unsafe.Pointer, val unsafe.Pointer) diff --git a/testing/internal/runtime/atomic/atomic_amd64.s b/testing/internal/runtime/atomic/atomic_amd64.s new file mode 100644 index 0000000..d6dc7a3 --- /dev/null +++ b/testing/internal/runtime/atomic/atomic_amd64.s @@ -0,0 +1,301 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Note: some of these functions are semantically inlined +// by the compiler (in src/cmd/compile/internal/gc/ssa.go). + +#include "textflag.h" + +TEXT ·Loaduintptr(SB), NOSPLIT, $0-16 + JMP ·Load64(SB) + +TEXT ·Loaduint(SB), NOSPLIT, $0-16 + JMP ·Load64(SB) + +TEXT ·Loadint32(SB), NOSPLIT, $0-12 + JMP ·Load(SB) + +TEXT ·Loadint64(SB), NOSPLIT, $0-16 + JMP ·Load64(SB) + +// bool Cas(int32 *val, int32 old, int32 new) +// Atomically: +// if(*val == old){ +// *val = new; +// return 1; +// } else +// return 0; +TEXT ·Cas(SB),NOSPLIT,$0-17 + MOVQ ptr+0(FP), BX + MOVL old+8(FP), AX + MOVL new+12(FP), CX + LOCK + CMPXCHGL CX, 0(BX) + SETEQ ret+16(FP) + RET + +// bool ·Cas64(uint64 *val, uint64 old, uint64 new) +// Atomically: +// if(*val == old){ +// *val = new; +// return 1; +// } else { +// return 0; +// } +TEXT ·Cas64(SB), NOSPLIT, $0-25 + MOVQ ptr+0(FP), BX + MOVQ old+8(FP), AX + MOVQ new+16(FP), CX + LOCK + CMPXCHGQ CX, 0(BX) + SETEQ ret+24(FP) + RET + +// bool Casp1(void **val, void *old, void *new) +// Atomically: +// if(*val == old){ +// *val = new; +// return 1; +// } else +// return 0; +TEXT ·Casp1(SB), NOSPLIT, $0-25 + MOVQ ptr+0(FP), BX + MOVQ old+8(FP), AX + MOVQ new+16(FP), CX + LOCK + CMPXCHGQ CX, 0(BX) + SETEQ ret+24(FP) + RET + +TEXT ·Casint32(SB), NOSPLIT, $0-17 + JMP ·Cas(SB) + +TEXT ·Casint64(SB), NOSPLIT, $0-25 + JMP ·Cas64(SB) + +TEXT ·Casuintptr(SB), NOSPLIT, $0-25 + JMP ·Cas64(SB) + +TEXT ·CasRel(SB), NOSPLIT, $0-17 + JMP ·Cas(SB) + +// uint32 Xadd(uint32 volatile *val, int32 delta) +// Atomically: +// *val += delta; +// return *val; +TEXT ·Xadd(SB), NOSPLIT, $0-20 + MOVQ ptr+0(FP), BX + MOVL delta+8(FP), AX + MOVL AX, CX + LOCK + XADDL AX, 0(BX) + ADDL CX, AX + MOVL AX, ret+16(FP) + RET + +// uint64 Xadd64(uint64 volatile *val, int64 delta) +// Atomically: +// *val += delta; +// return *val; +TEXT ·Xadd64(SB), NOSPLIT, $0-24 + MOVQ ptr+0(FP), BX + MOVQ delta+8(FP), AX + MOVQ AX, CX + LOCK + XADDQ AX, 0(BX) + ADDQ CX, AX + MOVQ AX, ret+16(FP) + RET + +TEXT ·Xaddint32(SB), NOSPLIT, $0-20 + JMP ·Xadd(SB) + +TEXT ·Xaddint64(SB), NOSPLIT, $0-24 + JMP ·Xadd64(SB) + +TEXT ·Xadduintptr(SB), NOSPLIT, $0-24 + JMP ·Xadd64(SB) + +// uint8 Xchg(ptr *uint8, new uint8) +// Atomically: +// old := *ptr; +// *ptr = new; +// return old; +TEXT ·Xchg8(SB), NOSPLIT, $0-17 + MOVQ ptr+0(FP), BX + MOVB new+8(FP), AX + XCHGB AX, 0(BX) + MOVB AX, ret+16(FP) + RET + +// uint32 Xchg(ptr *uint32, new uint32) +// Atomically: +// old := *ptr; +// *ptr = new; +// return old; +TEXT ·Xchg(SB), NOSPLIT, $0-20 + MOVQ ptr+0(FP), BX + MOVL new+8(FP), AX + XCHGL AX, 0(BX) + MOVL AX, ret+16(FP) + RET + +// uint64 Xchg64(ptr *uint64, new uint64) +// Atomically: +// old := *ptr; +// *ptr = new; +// return old; +TEXT ·Xchg64(SB), NOSPLIT, $0-24 + MOVQ ptr+0(FP), BX + MOVQ new+8(FP), AX + XCHGQ AX, 0(BX) + MOVQ AX, ret+16(FP) + RET + +TEXT ·Xchgint32(SB), NOSPLIT, $0-20 + JMP ·Xchg(SB) + +TEXT ·Xchgint64(SB), NOSPLIT, $0-24 + JMP ·Xchg64(SB) + +TEXT ·Xchguintptr(SB), NOSPLIT, $0-24 + JMP ·Xchg64(SB) + +TEXT ·StorepNoWB(SB), NOSPLIT, $0-16 + MOVQ ptr+0(FP), BX + MOVQ val+8(FP), AX + XCHGQ AX, 0(BX) + RET + +TEXT ·Store(SB), NOSPLIT, $0-12 + MOVQ ptr+0(FP), BX + MOVL val+8(FP), AX + XCHGL AX, 0(BX) + RET + +TEXT ·Store8(SB), NOSPLIT, $0-9 + MOVQ ptr+0(FP), BX + MOVB val+8(FP), AX + XCHGB AX, 0(BX) + RET + +TEXT ·Store64(SB), NOSPLIT, $0-16 + MOVQ ptr+0(FP), BX + MOVQ val+8(FP), AX + XCHGQ AX, 0(BX) + RET + +TEXT ·Storeint32(SB), NOSPLIT, $0-12 + JMP ·Store(SB) + +TEXT ·Storeint64(SB), NOSPLIT, $0-16 + JMP ·Store64(SB) + +TEXT ·Storeuintptr(SB), NOSPLIT, $0-16 + JMP ·Store64(SB) + +TEXT ·StoreRel(SB), NOSPLIT, $0-12 + JMP ·Store(SB) + +TEXT ·StoreRel64(SB), NOSPLIT, $0-16 + JMP ·Store64(SB) + +TEXT ·StoreReluintptr(SB), NOSPLIT, $0-16 + JMP ·Store64(SB) + +// void ·Or8(byte volatile*, byte); +TEXT ·Or8(SB), NOSPLIT, $0-9 + MOVQ ptr+0(FP), AX + MOVB val+8(FP), BX + LOCK + ORB BX, (AX) + RET + +// void ·And8(byte volatile*, byte); +TEXT ·And8(SB), NOSPLIT, $0-9 + MOVQ ptr+0(FP), AX + MOVB val+8(FP), BX + LOCK + ANDB BX, (AX) + RET + +// func Or(addr *uint32, v uint32) +TEXT ·Or(SB), NOSPLIT, $0-12 + MOVQ ptr+0(FP), AX + MOVL val+8(FP), BX + LOCK + ORL BX, (AX) + RET + +// func And(addr *uint32, v uint32) +TEXT ·And(SB), NOSPLIT, $0-12 + MOVQ ptr+0(FP), AX + MOVL val+8(FP), BX + LOCK + ANDL BX, (AX) + RET + +// func Or32(addr *uint32, v uint32) old uint32 +TEXT ·Or32(SB), NOSPLIT, $0-20 + MOVQ ptr+0(FP), BX + MOVL val+8(FP), CX +casloop: + MOVL CX, DX + MOVL (BX), AX + ORL AX, DX + LOCK + CMPXCHGL DX, (BX) + JNZ casloop + MOVL AX, ret+16(FP) + RET + +// func And32(addr *uint32, v uint32) old uint32 +TEXT ·And32(SB), NOSPLIT, $0-20 + MOVQ ptr+0(FP), BX + MOVL val+8(FP), CX +casloop: + MOVL CX, DX + MOVL (BX), AX + ANDL AX, DX + LOCK + CMPXCHGL DX, (BX) + JNZ casloop + MOVL AX, ret+16(FP) + RET + +// func Or64(addr *uint64, v uint64) old uint64 +TEXT ·Or64(SB), NOSPLIT, $0-24 + MOVQ ptr+0(FP), BX + MOVQ val+8(FP), CX +casloop: + MOVQ CX, DX + MOVQ (BX), AX + ORQ AX, DX + LOCK + CMPXCHGQ DX, (BX) + JNZ casloop + MOVQ AX, ret+16(FP) + RET + +// func And64(addr *uint64, v uint64) old uint64 +TEXT ·And64(SB), NOSPLIT, $0-24 + MOVQ ptr+0(FP), BX + MOVQ val+8(FP), CX +casloop: + MOVQ CX, DX + MOVQ (BX), AX + ANDQ AX, DX + LOCK + CMPXCHGQ DX, (BX) + JNZ casloop + MOVQ AX, ret+16(FP) + RET + +// func Anduintptr(addr *uintptr, v uintptr) old uintptr +TEXT ·Anduintptr(SB), NOSPLIT, $0-24 + JMP ·And64(SB) + +// func Oruintptr(addr *uintptr, v uintptr) old uintptr +TEXT ·Oruintptr(SB), NOSPLIT, $0-24 + JMP ·Or64(SB) diff --git a/testing/internal/runtime/atomic/atomic_andor_generic.go b/testing/internal/runtime/atomic/atomic_andor_generic.go new file mode 100644 index 0000000..433ee0b --- /dev/null +++ b/testing/internal/runtime/atomic/atomic_andor_generic.go @@ -0,0 +1,78 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build arm || wasm + +// Export some functions via linkname to assembly in sync/atomic. +// +//go:linkname And32 +//go:linkname Or32 +//go:linkname And64 +//go:linkname Or64 +//go:linkname Anduintptr +//go:linkname Oruintptr + +package atomic + +import _ "unsafe" // For linkname + +//go:nosplit +func And32(ptr *uint32, val uint32) uint32 { + for { + old := *ptr + if Cas(ptr, old, old&val) { + return old + } + } +} + +//go:nosplit +func Or32(ptr *uint32, val uint32) uint32 { + for { + old := *ptr + if Cas(ptr, old, old|val) { + return old + } + } +} + +//go:nosplit +func And64(ptr *uint64, val uint64) uint64 { + for { + old := *ptr + if Cas64(ptr, old, old&val) { + return old + } + } +} + +//go:nosplit +func Or64(ptr *uint64, val uint64) uint64 { + for { + old := *ptr + if Cas64(ptr, old, old|val) { + return old + } + } +} + +//go:nosplit +func Anduintptr(ptr *uintptr, val uintptr) uintptr { + for { + old := *ptr + if Casuintptr(ptr, old, old&val) { + return old + } + } +} + +//go:nosplit +func Oruintptr(ptr *uintptr, val uintptr) uintptr { + for { + old := *ptr + if Casuintptr(ptr, old, old|val) { + return old + } + } +} diff --git a/testing/internal/runtime/atomic/atomic_andor_test.go b/testing/internal/runtime/atomic/atomic_andor_test.go new file mode 100644 index 0000000..7516fe2 --- /dev/null +++ b/testing/internal/runtime/atomic/atomic_andor_test.go @@ -0,0 +1,249 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// TODO(61395): move these tests to atomic_test.go once And/Or have +// implementations for all architectures. +package atomic_test + +import ( + "testing" + + "github.com/CodSpeedHQ/codspeed-go/testing/internal/runtime/atomic" +) + +func TestAnd32(t *testing.T) { + // Basic sanity check. + x := uint32(0xffffffff) + for i := uint32(0); i < 32; i++ { + old := x + v := atomic.And32(&x, ^(1 << i)) + if r := uint32(0xffffffff) << (i + 1); x != r || v != old { + t.Fatalf("clearing bit %#x: want %#x, got new %#x and old %#v", uint32(1<>3)%uintptr(len(locktab))].l +} + +// Atomic add and return new value. +// +//go:nosplit +func Xadd(val *uint32, delta int32) uint32 { + for { + oval := *val + nval := oval + uint32(delta) + if Cas(val, oval, nval) { + return nval + } + } +} + +//go:noescape +func Xadduintptr(ptr *uintptr, delta uintptr) uintptr + +//go:nosplit +func Xchg(addr *uint32, v uint32) uint32 { + for { + old := *addr + if Cas(addr, old, v) { + return old + } + } +} + +//go:noescape +func Xchg8(addr *uint8, v uint8) uint8 + +//go:nosplit +func goXchg8(addr *uint8, v uint8) uint8 { + // Align down to 4 bytes and use 32-bit CAS. + addr32 := (*uint32)(unsafe.Pointer(uintptr(unsafe.Pointer(addr)) &^ 3)) + shift := (uintptr(unsafe.Pointer(addr)) & 3) * 8 // little endian + word := uint32(v) << shift + mask := uint32(0xFF) << shift + + for { + old := *addr32 // Read the old 32-bit value + // Clear the old 8 bits then insert the new value + if Cas(addr32, old, (old&^mask)|word) { + // Return the old 8-bit value + return uint8((old & mask) >> shift) + } + } +} + +//go:nosplit +func Xchguintptr(addr *uintptr, v uintptr) uintptr { + return uintptr(Xchg((*uint32)(unsafe.Pointer(addr)), uint32(v))) +} + +// Not noescape -- it installs a pointer to addr. +func StorepNoWB(addr unsafe.Pointer, v unsafe.Pointer) + +//go:noescape +func Store(addr *uint32, v uint32) + +//go:noescape +func StoreRel(addr *uint32, v uint32) + +//go:noescape +func StoreReluintptr(addr *uintptr, v uintptr) + +//go:nosplit +func goCas64(addr *uint64, old, new uint64) bool { + if uintptr(unsafe.Pointer(addr))&7 != 0 { + *(*int)(nil) = 0 // crash on unaligned uint64 + } + _ = *addr // if nil, fault before taking the lock + var ok bool + addrLock(addr).lock() + if *addr == old { + *addr = new + ok = true + } + addrLock(addr).unlock() + return ok +} + +//go:nosplit +func goXadd64(addr *uint64, delta int64) uint64 { + if uintptr(unsafe.Pointer(addr))&7 != 0 { + *(*int)(nil) = 0 // crash on unaligned uint64 + } + _ = *addr // if nil, fault before taking the lock + var r uint64 + addrLock(addr).lock() + r = *addr + uint64(delta) + *addr = r + addrLock(addr).unlock() + return r +} + +//go:nosplit +func goXchg64(addr *uint64, v uint64) uint64 { + if uintptr(unsafe.Pointer(addr))&7 != 0 { + *(*int)(nil) = 0 // crash on unaligned uint64 + } + _ = *addr // if nil, fault before taking the lock + var r uint64 + addrLock(addr).lock() + r = *addr + *addr = v + addrLock(addr).unlock() + return r +} + +//go:nosplit +func goLoad64(addr *uint64) uint64 { + if uintptr(unsafe.Pointer(addr))&7 != 0 { + *(*int)(nil) = 0 // crash on unaligned uint64 + } + _ = *addr // if nil, fault before taking the lock + var r uint64 + addrLock(addr).lock() + r = *addr + addrLock(addr).unlock() + return r +} + +//go:nosplit +func goStore64(addr *uint64, v uint64) { + if uintptr(unsafe.Pointer(addr))&7 != 0 { + *(*int)(nil) = 0 // crash on unaligned uint64 + } + _ = *addr // if nil, fault before taking the lock + addrLock(addr).lock() + *addr = v + addrLock(addr).unlock() +} + +//go:noescape +func Or8(addr *uint8, v uint8) + +//go:nosplit +func goOr8(addr *uint8, v uint8) { + // Align down to 4 bytes and use 32-bit CAS. + addr32 := (*uint32)(unsafe.Pointer(uintptr(unsafe.Pointer(addr)) &^ 3)) + word := uint32(v) << ((uintptr(unsafe.Pointer(addr)) & 3) * 8) // little endian + for { + old := *addr32 + if Cas(addr32, old, old|word) { + return + } + } +} + +//go:noescape +func And8(addr *uint8, v uint8) + +//go:nosplit +func goAnd8(addr *uint8, v uint8) { + // Align down to 4 bytes and use 32-bit CAS. + addr32 := (*uint32)(unsafe.Pointer(uintptr(unsafe.Pointer(addr)) &^ 3)) + word := uint32(v) << ((uintptr(unsafe.Pointer(addr)) & 3) * 8) // little endian + mask := uint32(0xFF) << ((uintptr(unsafe.Pointer(addr)) & 3) * 8) // little endian + word |= ^mask + for { + old := *addr32 + if Cas(addr32, old, old&word) { + return + } + } +} + +//go:nosplit +func Or(addr *uint32, v uint32) { + for { + old := *addr + if Cas(addr, old, old|v) { + return + } + } +} + +//go:nosplit +func And(addr *uint32, v uint32) { + for { + old := *addr + if Cas(addr, old, old&v) { + return + } + } +} + +//go:nosplit +func armcas(ptr *uint32, old, new uint32) bool + +//go:noescape +func Load(addr *uint32) uint32 + +// NO go:noescape annotation; *addr escapes if result escapes (#31525) +func Loadp(addr unsafe.Pointer) unsafe.Pointer + +//go:noescape +func Load8(addr *uint8) uint8 + +//go:noescape +func LoadAcq(addr *uint32) uint32 + +//go:noescape +func LoadAcquintptr(ptr *uintptr) uintptr + +//go:noescape +func Cas64(addr *uint64, old, new uint64) bool + +//go:noescape +func CasRel(addr *uint32, old, new uint32) bool + +//go:noescape +func Xadd64(addr *uint64, delta int64) uint64 + +//go:noescape +func Xchg64(addr *uint64, v uint64) uint64 + +//go:noescape +func Load64(addr *uint64) uint64 + +//go:noescape +func Store8(addr *uint8, v uint8) + +//go:noescape +func Store64(addr *uint64, v uint64) diff --git a/testing/internal/runtime/atomic/atomic_arm.s b/testing/internal/runtime/atomic/atomic_arm.s new file mode 100644 index 0000000..85cee04 --- /dev/null +++ b/testing/internal/runtime/atomic/atomic_arm.s @@ -0,0 +1,407 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "go_asm.h" +#include "textflag.h" +#include "funcdata.h" + +// bool armcas(int32 *val, int32 old, int32 new) +// Atomically: +// if(*val == old){ +// *val = new; +// return 1; +// }else +// return 0; +// +// To implement ·cas in sys_$GOOS_arm.s +// using the native instructions, use: +// +// TEXT ·cas(SB),NOSPLIT,$0 +// B ·armcas(SB) +// +TEXT ·armcas(SB),NOSPLIT,$0-13 + MOVW ptr+0(FP), R1 + MOVW old+4(FP), R2 + MOVW new+8(FP), R3 +casl: + LDREX (R1), R0 + CMP R0, R2 + BNE casfail + +#ifndef GOARM_7 + MOVB internal∕cpu·ARM+const_offsetARMHasV7Atomics(SB), R11 + CMP $0, R11 + BEQ 2(PC) +#endif + DMB MB_ISHST + + STREX R3, (R1), R0 + CMP $0, R0 + BNE casl + MOVW $1, R0 + +#ifndef GOARM_7 + CMP $0, R11 + BEQ 2(PC) +#endif + DMB MB_ISH + + MOVB R0, ret+12(FP) + RET +casfail: + MOVW $0, R0 + MOVB R0, ret+12(FP) + RET + +// stubs + +TEXT ·Loadp(SB),NOSPLIT|NOFRAME,$0-8 + B ·Load(SB) + +TEXT ·LoadAcq(SB),NOSPLIT|NOFRAME,$0-8 + B ·Load(SB) + +TEXT ·LoadAcquintptr(SB),NOSPLIT|NOFRAME,$0-8 + B ·Load(SB) + +TEXT ·Casint32(SB),NOSPLIT,$0-13 + B ·Cas(SB) + +TEXT ·Casint64(SB),NOSPLIT,$-4-21 + B ·Cas64(SB) + +TEXT ·Casuintptr(SB),NOSPLIT,$0-13 + B ·Cas(SB) + +TEXT ·Casp1(SB),NOSPLIT,$0-13 + B ·Cas(SB) + +TEXT ·CasRel(SB),NOSPLIT,$0-13 + B ·Cas(SB) + +TEXT ·Loadint32(SB),NOSPLIT,$0-8 + B ·Load(SB) + +TEXT ·Loadint64(SB),NOSPLIT,$-4-12 + B ·Load64(SB) + +TEXT ·Loaduintptr(SB),NOSPLIT,$0-8 + B ·Load(SB) + +TEXT ·Loaduint(SB),NOSPLIT,$0-8 + B ·Load(SB) + +TEXT ·Storeint32(SB),NOSPLIT,$0-8 + B ·Store(SB) + +TEXT ·Storeint64(SB),NOSPLIT,$0-12 + B ·Store64(SB) + +TEXT ·Storeuintptr(SB),NOSPLIT,$0-8 + B ·Store(SB) + +TEXT ·StorepNoWB(SB),NOSPLIT,$0-8 + B ·Store(SB) + +TEXT ·StoreRel(SB),NOSPLIT,$0-8 + B ·Store(SB) + +TEXT ·StoreReluintptr(SB),NOSPLIT,$0-8 + B ·Store(SB) + +TEXT ·Xaddint32(SB),NOSPLIT,$0-12 + B ·Xadd(SB) + +TEXT ·Xaddint64(SB),NOSPLIT,$-4-20 + B ·Xadd64(SB) + +TEXT ·Xadduintptr(SB),NOSPLIT,$0-12 + B ·Xadd(SB) + +TEXT ·Xchgint32(SB),NOSPLIT,$0-12 + B ·Xchg(SB) + +TEXT ·Xchgint64(SB),NOSPLIT,$-4-20 + B ·Xchg64(SB) + +// 64-bit atomics +// The native ARM implementations use LDREXD/STREXD, which are +// available on ARMv6k or later. We use them only on ARMv7. +// On older ARM, we use Go implementations which simulate 64-bit +// atomics with locks. +TEXT armCas64<>(SB),NOSPLIT,$0-21 + // addr is already in R1 + MOVW old_lo+4(FP), R2 + MOVW old_hi+8(FP), R3 + MOVW new_lo+12(FP), R4 + MOVW new_hi+16(FP), R5 +cas64loop: + LDREXD (R1), R6 // loads R6 and R7 + CMP R2, R6 + BNE cas64fail + CMP R3, R7 + BNE cas64fail + + DMB MB_ISHST + + STREXD R4, (R1), R0 // stores R4 and R5 + CMP $0, R0 + BNE cas64loop + MOVW $1, R0 + + DMB MB_ISH + + MOVBU R0, swapped+20(FP) + RET +cas64fail: + MOVW $0, R0 + MOVBU R0, swapped+20(FP) + RET + +TEXT armXadd64<>(SB),NOSPLIT,$0-20 + // addr is already in R1 + MOVW delta_lo+4(FP), R2 + MOVW delta_hi+8(FP), R3 + +add64loop: + LDREXD (R1), R4 // loads R4 and R5 + ADD.S R2, R4 + ADC R3, R5 + + DMB MB_ISHST + + STREXD R4, (R1), R0 // stores R4 and R5 + CMP $0, R0 + BNE add64loop + + DMB MB_ISH + + MOVW R4, new_lo+12(FP) + MOVW R5, new_hi+16(FP) + RET + +TEXT armXchg64<>(SB),NOSPLIT,$0-20 + // addr is already in R1 + MOVW new_lo+4(FP), R2 + MOVW new_hi+8(FP), R3 + +swap64loop: + LDREXD (R1), R4 // loads R4 and R5 + + DMB MB_ISHST + + STREXD R2, (R1), R0 // stores R2 and R3 + CMP $0, R0 + BNE swap64loop + + DMB MB_ISH + + MOVW R4, old_lo+12(FP) + MOVW R5, old_hi+16(FP) + RET + +TEXT armLoad64<>(SB),NOSPLIT,$0-12 + // addr is already in R1 + + LDREXD (R1), R2 // loads R2 and R3 + DMB MB_ISH + + MOVW R2, val_lo+4(FP) + MOVW R3, val_hi+8(FP) + RET + +TEXT armStore64<>(SB),NOSPLIT,$0-12 + // addr is already in R1 + MOVW val_lo+4(FP), R2 + MOVW val_hi+8(FP), R3 + +store64loop: + LDREXD (R1), R4 // loads R4 and R5 + + DMB MB_ISHST + + STREXD R2, (R1), R0 // stores R2 and R3 + CMP $0, R0 + BNE store64loop + + DMB MB_ISH + RET + +TEXT armAnd8<>(SB),NOSPLIT,$0-5 + // addr is already in R1 + MOVB v+4(FP), R2 + +and8loop: + LDREXB (R1), R6 + + DMB MB_ISHST + + AND R2, R6 + STREXB R6, (R1), R0 + CMP $0, R0 + BNE and8loop + + DMB MB_ISH + + RET + +TEXT armOr8<>(SB),NOSPLIT,$0-5 + // addr is already in R1 + MOVB v+4(FP), R2 + +or8loop: + LDREXB (R1), R6 + + DMB MB_ISHST + + ORR R2, R6 + STREXB R6, (R1), R0 + CMP $0, R0 + BNE or8loop + + DMB MB_ISH + + RET + +TEXT armXchg8<>(SB),NOSPLIT,$0-9 + // addr is already in R1 + MOVB v+4(FP), R2 +xchg8loop: + LDREXB (R1), R6 + + DMB MB_ISHST + + STREXB R2, (R1), R0 + CMP $0, R0 + BNE xchg8loop + + DMB MB_ISH + + MOVB R6, ret+8(FP) + RET + +// The following functions all panic if their address argument isn't +// 8-byte aligned. Since we're calling back into Go code to do this, +// we have to cooperate with stack unwinding. In the normal case, the +// functions tail-call into the appropriate implementation, which +// means they must not open a frame. Hence, when they go down the +// panic path, at that point they push the LR to create a real frame +// (they don't need to pop it because panic won't return; however, we +// do need to set the SP delta back). + +// Check if R1 is 8-byte aligned, panic if not. +// Clobbers R2. +#define CHECK_ALIGN \ + AND.S $7, R1, R2 \ + BEQ 4(PC) \ + MOVW.W R14, -4(R13) /* prepare a real frame */ \ + BL ·panicUnaligned(SB) \ + ADD $4, R13 /* compensate SP delta */ + +TEXT ·Cas64(SB),NOSPLIT,$-4-21 + NO_LOCAL_POINTERS + MOVW addr+0(FP), R1 + CHECK_ALIGN + +#ifndef GOARM_7 + MOVB internal∕cpu·ARM+const_offsetARMHasV7Atomics(SB), R11 + CMP $1, R11 + BEQ 2(PC) + JMP ·goCas64(SB) +#endif + JMP armCas64<>(SB) + +TEXT ·Xadd64(SB),NOSPLIT,$-4-20 + NO_LOCAL_POINTERS + MOVW addr+0(FP), R1 + CHECK_ALIGN + +#ifndef GOARM_7 + MOVB internal∕cpu·ARM+const_offsetARMHasV7Atomics(SB), R11 + CMP $1, R11 + BEQ 2(PC) + JMP ·goXadd64(SB) +#endif + JMP armXadd64<>(SB) + +TEXT ·Xchg64(SB),NOSPLIT,$-4-20 + NO_LOCAL_POINTERS + MOVW addr+0(FP), R1 + CHECK_ALIGN + +#ifndef GOARM_7 + MOVB internal∕cpu·ARM+const_offsetARMHasV7Atomics(SB), R11 + CMP $1, R11 + BEQ 2(PC) + JMP ·goXchg64(SB) +#endif + JMP armXchg64<>(SB) + +TEXT ·Load64(SB),NOSPLIT,$-4-12 + NO_LOCAL_POINTERS + MOVW addr+0(FP), R1 + CHECK_ALIGN + +#ifndef GOARM_7 + MOVB internal∕cpu·ARM+const_offsetARMHasV7Atomics(SB), R11 + CMP $1, R11 + BEQ 2(PC) + JMP ·goLoad64(SB) +#endif + JMP armLoad64<>(SB) + +TEXT ·Store64(SB),NOSPLIT,$-4-12 + NO_LOCAL_POINTERS + MOVW addr+0(FP), R1 + CHECK_ALIGN + +#ifndef GOARM_7 + MOVB internal∕cpu·ARM+const_offsetARMHasV7Atomics(SB), R11 + CMP $1, R11 + BEQ 2(PC) + JMP ·goStore64(SB) +#endif + JMP armStore64<>(SB) + +TEXT ·And8(SB),NOSPLIT,$-4-5 + NO_LOCAL_POINTERS + MOVW addr+0(FP), R1 + +// Uses STREXB/LDREXB that is armv6k or later. +// For simplicity we only enable this on armv7. +#ifndef GOARM_7 + MOVB internal∕cpu·ARM+const_offsetARMHasV7Atomics(SB), R11 + CMP $1, R11 + BEQ 2(PC) + JMP ·goAnd8(SB) +#endif + JMP armAnd8<>(SB) + +TEXT ·Or8(SB),NOSPLIT,$-4-5 + NO_LOCAL_POINTERS + MOVW addr+0(FP), R1 + +// Uses STREXB/LDREXB that is armv6k or later. +// For simplicity we only enable this on armv7. +#ifndef GOARM_7 + MOVB internal∕cpu·ARM+const_offsetARMHasV7Atomics(SB), R11 + CMP $1, R11 + BEQ 2(PC) + JMP ·goOr8(SB) +#endif + JMP armOr8<>(SB) + +TEXT ·Xchg8(SB),NOSPLIT,$-4-9 + NO_LOCAL_POINTERS + MOVW addr+0(FP), R1 + + // Uses STREXB/LDREXB that is armv6k or later. + // For simplicity we only enable this on armv7. +#ifndef GOARM_7 + MOVB internal∕cpu·ARM+const_offsetARMHasV7Atomics(SB), R11 + CMP $1, R11 + BEQ 2(PC) + JMP ·goXchg8(SB) +#endif + JMP armXchg8<>(SB) diff --git a/testing/internal/runtime/atomic/atomic_arm64.go b/testing/internal/runtime/atomic/atomic_arm64.go new file mode 100644 index 0000000..53074c2 --- /dev/null +++ b/testing/internal/runtime/atomic/atomic_arm64.go @@ -0,0 +1,116 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build arm64 + +package atomic + +import ( + "unsafe" + + "github.com/CodSpeedHQ/codspeed-go/testing/internal/cpu" +) + +const ( + offsetARM64HasATOMICS = unsafe.Offsetof(cpu.ARM64.HasATOMICS) +) + +//go:noescape +func Xadd(ptr *uint32, delta int32) uint32 + +//go:noescape +func Xadd64(ptr *uint64, delta int64) uint64 + +//go:noescape +func Xadduintptr(ptr *uintptr, delta uintptr) uintptr + +//go:noescape +func Xchg8(ptr *uint8, new uint8) uint8 + +//go:noescape +func Xchg(ptr *uint32, new uint32) uint32 + +//go:noescape +func Xchg64(ptr *uint64, new uint64) uint64 + +//go:noescape +func Xchguintptr(ptr *uintptr, new uintptr) uintptr + +//go:noescape +func Load(ptr *uint32) uint32 + +//go:noescape +func Load8(ptr *uint8) uint8 + +//go:noescape +func Load64(ptr *uint64) uint64 + +// NO go:noescape annotation; *ptr escapes if result escapes (#31525) +func Loadp(ptr unsafe.Pointer) unsafe.Pointer + +//go:noescape +func LoadAcq(addr *uint32) uint32 + +//go:noescape +func LoadAcq64(ptr *uint64) uint64 + +//go:noescape +func LoadAcquintptr(ptr *uintptr) uintptr + +//go:noescape +func Or8(ptr *uint8, val uint8) + +//go:noescape +func And8(ptr *uint8, val uint8) + +//go:noescape +func And(ptr *uint32, val uint32) + +//go:noescape +func Or(ptr *uint32, val uint32) + +//go:noescape +func And32(ptr *uint32, val uint32) uint32 + +//go:noescape +func Or32(ptr *uint32, val uint32) uint32 + +//go:noescape +func And64(ptr *uint64, val uint64) uint64 + +//go:noescape +func Or64(ptr *uint64, val uint64) uint64 + +//go:noescape +func Anduintptr(ptr *uintptr, val uintptr) uintptr + +//go:noescape +func Oruintptr(ptr *uintptr, val uintptr) uintptr + +//go:noescape +func Cas64(ptr *uint64, old, new uint64) bool + +//go:noescape +func CasRel(ptr *uint32, old, new uint32) bool + +//go:noescape +func Store(ptr *uint32, val uint32) + +//go:noescape +func Store8(ptr *uint8, val uint8) + +//go:noescape +func Store64(ptr *uint64, val uint64) + +// NO go:noescape annotation; see atomic_pointer.go. +func StorepNoWB(ptr unsafe.Pointer, val unsafe.Pointer) + +//go:noescape +func StoreRel(ptr *uint32, val uint32) + +//go:noescape +func StoreRel64(ptr *uint64, val uint64) + +//go:noescape +func StoreReluintptr(ptr *uintptr, val uintptr) diff --git a/testing/internal/runtime/atomic/atomic_arm64.s b/testing/internal/runtime/atomic/atomic_arm64.s new file mode 100644 index 0000000..09f3b53 --- /dev/null +++ b/testing/internal/runtime/atomic/atomic_arm64.s @@ -0,0 +1,491 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "go_asm.h" +#include "textflag.h" + +TEXT ·Casint32(SB), NOSPLIT, $0-17 + B ·Cas(SB) + +TEXT ·Casint64(SB), NOSPLIT, $0-25 + B ·Cas64(SB) + +TEXT ·Casuintptr(SB), NOSPLIT, $0-25 + B ·Cas64(SB) + +TEXT ·CasRel(SB), NOSPLIT, $0-17 + B ·Cas(SB) + +TEXT ·Loadint32(SB), NOSPLIT, $0-12 + B ·Load(SB) + +TEXT ·Loadint64(SB), NOSPLIT, $0-16 + B ·Load64(SB) + +TEXT ·Loaduintptr(SB), NOSPLIT, $0-16 + B ·Load64(SB) + +TEXT ·Loaduint(SB), NOSPLIT, $0-16 + B ·Load64(SB) + +TEXT ·Storeint32(SB), NOSPLIT, $0-12 + B ·Store(SB) + +TEXT ·Storeint64(SB), NOSPLIT, $0-16 + B ·Store64(SB) + +TEXT ·Storeuintptr(SB), NOSPLIT, $0-16 + B ·Store64(SB) + +TEXT ·Xaddint32(SB), NOSPLIT, $0-20 + B ·Xadd(SB) + +TEXT ·Xaddint64(SB), NOSPLIT, $0-24 + B ·Xadd64(SB) + +TEXT ·Xadduintptr(SB), NOSPLIT, $0-24 + B ·Xadd64(SB) + +TEXT ·Casp1(SB), NOSPLIT, $0-25 + B ·Cas64(SB) + +// uint32 ·Load(uint32 volatile* addr) +TEXT ·Load(SB),NOSPLIT,$0-12 + MOVD ptr+0(FP), R0 + LDARW (R0), R0 + MOVW R0, ret+8(FP) + RET + +// uint8 ·Load8(uint8 volatile* addr) +TEXT ·Load8(SB),NOSPLIT,$0-9 + MOVD ptr+0(FP), R0 + LDARB (R0), R0 + MOVB R0, ret+8(FP) + RET + +// uint64 ·Load64(uint64 volatile* addr) +TEXT ·Load64(SB),NOSPLIT,$0-16 + MOVD ptr+0(FP), R0 + LDAR (R0), R0 + MOVD R0, ret+8(FP) + RET + +// void *·Loadp(void *volatile *addr) +TEXT ·Loadp(SB),NOSPLIT,$0-16 + MOVD ptr+0(FP), R0 + LDAR (R0), R0 + MOVD R0, ret+8(FP) + RET + +// uint32 ·LoadAcq(uint32 volatile* addr) +TEXT ·LoadAcq(SB),NOSPLIT,$0-12 + B ·Load(SB) + +// uint64 ·LoadAcquintptr(uint64 volatile* addr) +TEXT ·LoadAcq64(SB),NOSPLIT,$0-16 + B ·Load64(SB) + +// uintptr ·LoadAcq64(uintptr volatile* addr) +TEXT ·LoadAcquintptr(SB),NOSPLIT,$0-16 + B ·Load64(SB) + +TEXT ·StorepNoWB(SB), NOSPLIT, $0-16 + B ·Store64(SB) + +TEXT ·StoreRel(SB), NOSPLIT, $0-12 + B ·Store(SB) + +TEXT ·StoreRel64(SB), NOSPLIT, $0-16 + B ·Store64(SB) + +TEXT ·StoreReluintptr(SB), NOSPLIT, $0-16 + B ·Store64(SB) + +TEXT ·Store(SB), NOSPLIT, $0-12 + MOVD ptr+0(FP), R0 + MOVW val+8(FP), R1 + STLRW R1, (R0) + RET + +TEXT ·Store8(SB), NOSPLIT, $0-9 + MOVD ptr+0(FP), R0 + MOVB val+8(FP), R1 + STLRB R1, (R0) + RET + +TEXT ·Store64(SB), NOSPLIT, $0-16 + MOVD ptr+0(FP), R0 + MOVD val+8(FP), R1 + STLR R1, (R0) + RET + +// uint8 Xchg(ptr *uint8, new uint8) +// Atomically: +// old := *ptr; +// *ptr = new; +// return old; +TEXT ·Xchg8(SB), NOSPLIT, $0-17 + MOVD ptr+0(FP), R0 + MOVB new+8(FP), R1 +#ifndef GOARM64_LSE + MOVBU internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4 + CBZ R4, load_store_loop +#endif + SWPALB R1, (R0), R2 + MOVB R2, ret+16(FP) + RET +#ifndef GOARM64_LSE +load_store_loop: + LDAXRB (R0), R2 + STLXRB R1, (R0), R3 + CBNZ R3, load_store_loop + MOVB R2, ret+16(FP) + RET +#endif + +// uint32 Xchg(ptr *uint32, new uint32) +// Atomically: +// old := *ptr; +// *ptr = new; +// return old; +TEXT ·Xchg(SB), NOSPLIT, $0-20 + MOVD ptr+0(FP), R0 + MOVW new+8(FP), R1 +#ifndef GOARM64_LSE + MOVBU internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4 + CBZ R4, load_store_loop +#endif + SWPALW R1, (R0), R2 + MOVW R2, ret+16(FP) + RET +#ifndef GOARM64_LSE +load_store_loop: + LDAXRW (R0), R2 + STLXRW R1, (R0), R3 + CBNZ R3, load_store_loop + MOVW R2, ret+16(FP) + RET +#endif + +// uint64 Xchg64(ptr *uint64, new uint64) +// Atomically: +// old := *ptr; +// *ptr = new; +// return old; +TEXT ·Xchg64(SB), NOSPLIT, $0-24 + MOVD ptr+0(FP), R0 + MOVD new+8(FP), R1 +#ifndef GOARM64_LSE + MOVBU internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4 + CBZ R4, load_store_loop +#endif + SWPALD R1, (R0), R2 + MOVD R2, ret+16(FP) + RET +#ifndef GOARM64_LSE +load_store_loop: + LDAXR (R0), R2 + STLXR R1, (R0), R3 + CBNZ R3, load_store_loop + MOVD R2, ret+16(FP) + RET +#endif + +// bool Cas(uint32 *ptr, uint32 old, uint32 new) +// Atomically: +// if(*val == old){ +// *val = new; +// return 1; +// } else +// return 0; +TEXT ·Cas(SB), NOSPLIT, $0-17 + MOVD ptr+0(FP), R0 + MOVW old+8(FP), R1 + MOVW new+12(FP), R2 +#ifndef GOARM64_LSE + MOVBU internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4 + CBZ R4, load_store_loop +#endif + MOVD R1, R3 + CASALW R3, (R0), R2 + CMP R1, R3 + CSET EQ, R0 + MOVB R0, ret+16(FP) + RET +#ifndef GOARM64_LSE +load_store_loop: + LDAXRW (R0), R3 + CMPW R1, R3 + BNE ok + STLXRW R2, (R0), R3 + CBNZ R3, load_store_loop +ok: + CSET EQ, R0 + MOVB R0, ret+16(FP) + RET +#endif + +// bool ·Cas64(uint64 *ptr, uint64 old, uint64 new) +// Atomically: +// if(*val == old){ +// *val = new; +// return 1; +// } else { +// return 0; +// } +TEXT ·Cas64(SB), NOSPLIT, $0-25 + MOVD ptr+0(FP), R0 + MOVD old+8(FP), R1 + MOVD new+16(FP), R2 +#ifndef GOARM64_LSE + MOVBU internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4 + CBZ R4, load_store_loop +#endif + MOVD R1, R3 + CASALD R3, (R0), R2 + CMP R1, R3 + CSET EQ, R0 + MOVB R0, ret+24(FP) + RET +#ifndef GOARM64_LSE +load_store_loop: + LDAXR (R0), R3 + CMP R1, R3 + BNE ok + STLXR R2, (R0), R3 + CBNZ R3, load_store_loop +ok: + CSET EQ, R0 + MOVB R0, ret+24(FP) + RET +#endif + +// uint32 xadd(uint32 volatile *ptr, int32 delta) +// Atomically: +// *val += delta; +// return *val; +TEXT ·Xadd(SB), NOSPLIT, $0-20 + MOVD ptr+0(FP), R0 + MOVW delta+8(FP), R1 +#ifndef GOARM64_LSE + MOVBU internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4 + CBZ R4, load_store_loop +#endif + LDADDALW R1, (R0), R2 + ADD R1, R2 + MOVW R2, ret+16(FP) + RET +#ifndef GOARM64_LSE +load_store_loop: + LDAXRW (R0), R2 + ADDW R2, R1, R2 + STLXRW R2, (R0), R3 + CBNZ R3, load_store_loop + MOVW R2, ret+16(FP) + RET +#endif + +// uint64 Xadd64(uint64 volatile *ptr, int64 delta) +// Atomically: +// *val += delta; +// return *val; +TEXT ·Xadd64(SB), NOSPLIT, $0-24 + MOVD ptr+0(FP), R0 + MOVD delta+8(FP), R1 +#ifndef GOARM64_LSE + MOVBU internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4 + CBZ R4, load_store_loop +#endif + LDADDALD R1, (R0), R2 + ADD R1, R2 + MOVD R2, ret+16(FP) + RET +#ifndef GOARM64_LSE +load_store_loop: + LDAXR (R0), R2 + ADD R2, R1, R2 + STLXR R2, (R0), R3 + CBNZ R3, load_store_loop + MOVD R2, ret+16(FP) + RET +#endif + +TEXT ·Xchgint32(SB), NOSPLIT, $0-20 + B ·Xchg(SB) + +TEXT ·Xchgint64(SB), NOSPLIT, $0-24 + B ·Xchg64(SB) + +TEXT ·Xchguintptr(SB), NOSPLIT, $0-24 + B ·Xchg64(SB) + +TEXT ·And8(SB), NOSPLIT, $0-9 + MOVD ptr+0(FP), R0 + MOVB val+8(FP), R1 +#ifndef GOARM64_LSE + MOVBU internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4 + CBZ R4, load_store_loop +#endif + MVN R1, R2 + LDCLRALB R2, (R0), R3 + RET +#ifndef GOARM64_LSE +load_store_loop: + LDAXRB (R0), R2 + AND R1, R2 + STLXRB R2, (R0), R3 + CBNZ R3, load_store_loop + RET +#endif + +TEXT ·Or8(SB), NOSPLIT, $0-9 + MOVD ptr+0(FP), R0 + MOVB val+8(FP), R1 +#ifndef GOARM64_LSE + MOVBU internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4 + CBZ R4, load_store_loop +#endif + LDORALB R1, (R0), R2 + RET +#ifndef GOARM64_LSE +load_store_loop: + LDAXRB (R0), R2 + ORR R1, R2 + STLXRB R2, (R0), R3 + CBNZ R3, load_store_loop + RET +#endif + +// func And(addr *uint32, v uint32) +TEXT ·And(SB), NOSPLIT, $0-12 + MOVD ptr+0(FP), R0 + MOVW val+8(FP), R1 +#ifndef GOARM64_LSE + MOVBU internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4 + CBZ R4, load_store_loop +#endif + MVN R1, R2 + LDCLRALW R2, (R0), R3 + RET +#ifndef GOARM64_LSE +load_store_loop: + LDAXRW (R0), R2 + AND R1, R2 + STLXRW R2, (R0), R3 + CBNZ R3, load_store_loop + RET +#endif + +// func Or(addr *uint32, v uint32) +TEXT ·Or(SB), NOSPLIT, $0-12 + MOVD ptr+0(FP), R0 + MOVW val+8(FP), R1 +#ifndef GOARM64_LSE + MOVBU internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4 + CBZ R4, load_store_loop +#endif + LDORALW R1, (R0), R2 + RET +#ifndef GOARM64_LSE +load_store_loop: + LDAXRW (R0), R2 + ORR R1, R2 + STLXRW R2, (R0), R3 + CBNZ R3, load_store_loop + RET +#endif + +// func Or32(addr *uint32, v uint32) old uint32 +TEXT ·Or32(SB), NOSPLIT, $0-20 + MOVD ptr+0(FP), R0 + MOVW val+8(FP), R1 +#ifndef GOARM64_LSE + MOVBU internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4 + CBZ R4, load_store_loop +#endif + LDORALW R1, (R0), R2 + MOVD R2, ret+16(FP) + RET +#ifndef GOARM64_LSE +load_store_loop: + LDAXRW (R0), R2 + ORR R1, R2, R3 + STLXRW R3, (R0), R4 + CBNZ R4, load_store_loop + MOVD R2, ret+16(FP) + RET +#endif + +// func And32(addr *uint32, v uint32) old uint32 +TEXT ·And32(SB), NOSPLIT, $0-20 + MOVD ptr+0(FP), R0 + MOVW val+8(FP), R1 +#ifndef GOARM64_LSE + MOVBU internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4 + CBZ R4, load_store_loop +#endif + MVN R1, R2 + LDCLRALW R2, (R0), R3 + MOVD R3, ret+16(FP) + RET +#ifndef GOARM64_LSE +load_store_loop: + LDAXRW (R0), R2 + AND R1, R2, R3 + STLXRW R3, (R0), R4 + CBNZ R4, load_store_loop + MOVD R2, ret+16(FP) + RET +#endif + +// func Or64(addr *uint64, v uint64) old uint64 +TEXT ·Or64(SB), NOSPLIT, $0-24 + MOVD ptr+0(FP), R0 + MOVD val+8(FP), R1 +#ifndef GOARM64_LSE + MOVBU internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4 + CBZ R4, load_store_loop +#endif + LDORALD R1, (R0), R2 + MOVD R2, ret+16(FP) + RET +#ifndef GOARM64_LSE +load_store_loop: + LDAXR (R0), R2 + ORR R1, R2, R3 + STLXR R3, (R0), R4 + CBNZ R4, load_store_loop + MOVD R2, ret+16(FP) + RET +#endif + +// func And64(addr *uint64, v uint64) old uint64 +TEXT ·And64(SB), NOSPLIT, $0-24 + MOVD ptr+0(FP), R0 + MOVD val+8(FP), R1 +#ifndef GOARM64_LSE + MOVBU internal∕cpu·ARM64+const_offsetARM64HasATOMICS(SB), R4 + CBZ R4, load_store_loop +#endif + MVN R1, R2 + LDCLRALD R2, (R0), R3 + MOVD R3, ret+16(FP) + RET +#ifndef GOARM64_LSE +load_store_loop: + LDAXR (R0), R2 + AND R1, R2, R3 + STLXR R3, (R0), R4 + CBNZ R4, load_store_loop + MOVD R2, ret+16(FP) + RET +#endif + +// func Anduintptr(addr *uintptr, v uintptr) old uintptr +TEXT ·Anduintptr(SB), NOSPLIT, $0-24 + B ·And64(SB) + +// func Oruintptr(addr *uintptr, v uintptr) old uintptr +TEXT ·Oruintptr(SB), NOSPLIT, $0-24 + B ·Or64(SB) diff --git a/testing/internal/runtime/atomic/atomic_loong64.go b/testing/internal/runtime/atomic/atomic_loong64.go new file mode 100644 index 0000000..b11daa5 --- /dev/null +++ b/testing/internal/runtime/atomic/atomic_loong64.go @@ -0,0 +1,119 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build loong64 + +package atomic + +import ( + "unsafe" + + "github.com/CodSpeedHQ/codspeed-go/testing/internal/cpu" +) + +const ( + offsetLOONG64HasLAMCAS = unsafe.Offsetof(cpu.Loong64.HasLAMCAS) + offsetLoong64HasLAM_BH = unsafe.Offsetof(cpu.Loong64.HasLAM_BH) +) + +//go:noescape +func Xadd(ptr *uint32, delta int32) uint32 + +//go:noescape +func Xadd64(ptr *uint64, delta int64) uint64 + +//go:noescape +func Xadduintptr(ptr *uintptr, delta uintptr) uintptr + +//go:noescape +func Xchg8(ptr *uint8, new uint8) uint8 + +//go:noescape +func Xchg(ptr *uint32, new uint32) uint32 + +//go:noescape +func Xchg64(ptr *uint64, new uint64) uint64 + +//go:noescape +func Xchguintptr(ptr *uintptr, new uintptr) uintptr + +//go:noescape +func Load(ptr *uint32) uint32 + +//go:noescape +func Load8(ptr *uint8) uint8 + +//go:noescape +func Load64(ptr *uint64) uint64 + +// NO go:noescape annotation; *ptr escapes if result escapes (#31525) +func Loadp(ptr unsafe.Pointer) unsafe.Pointer + +//go:noescape +func LoadAcq(ptr *uint32) uint32 + +//go:noescape +func LoadAcq64(ptr *uint64) uint64 + +//go:noescape +func LoadAcquintptr(ptr *uintptr) uintptr + +//go:noescape +func And8(ptr *uint8, val uint8) + +//go:noescape +func And(ptr *uint32, val uint32) + +//go:noescape +func Or8(ptr *uint8, val uint8) + +//go:noescape +func Or(ptr *uint32, val uint32) + +//go:noescape +func And32(ptr *uint32, val uint32) uint32 + +//go:noescape +func Or32(ptr *uint32, val uint32) uint32 + +//go:noescape +func And64(ptr *uint64, val uint64) uint64 + +//go:noescape +func Or64(ptr *uint64, val uint64) uint64 + +//go:noescape +func Anduintptr(ptr *uintptr, val uintptr) uintptr + +//go:noescape +func Oruintptr(ptr *uintptr, val uintptr) uintptr + +// NOTE: Do not add atomicxor8 (XOR is not idempotent). + +//go:noescape +func Cas64(ptr *uint64, old, new uint64) bool + +//go:noescape +func CasRel(ptr *uint32, old, new uint32) bool + +//go:noescape +func Store(ptr *uint32, val uint32) + +//go:noescape +func Store8(ptr *uint8, val uint8) + +//go:noescape +func Store64(ptr *uint64, val uint64) + +// NO go:noescape annotation; see atomic_pointer.go. +func StorepNoWB(ptr unsafe.Pointer, val unsafe.Pointer) + +//go:noescape +func StoreRel(ptr *uint32, val uint32) + +//go:noescape +func StoreRel64(ptr *uint64, val uint64) + +//go:noescape +func StoreReluintptr(ptr *uintptr, val uintptr) diff --git a/testing/internal/runtime/atomic/atomic_loong64.s b/testing/internal/runtime/atomic/atomic_loong64.s new file mode 100644 index 0000000..1f16ad1 --- /dev/null +++ b/testing/internal/runtime/atomic/atomic_loong64.s @@ -0,0 +1,380 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "go_asm.h" +#include "textflag.h" + +// bool cas(uint32 *ptr, uint32 old, uint32 new) +// Atomically: +// if(*ptr == old){ +// *ptr = new; +// return 1; +// } else +// return 0; +TEXT ·Cas(SB), NOSPLIT, $0-17 + MOVV ptr+0(FP), R4 + MOVW old+8(FP), R5 + MOVW new+12(FP), R6 + + MOVBU internal∕cpu·Loong64+const_offsetLOONG64HasLAMCAS(SB), R8 + BEQ R8, cas_again + MOVV R5, R7 // backup old value + AMCASDBW R6, (R4), R5 + BNE R7, R5, cas_fail0 + MOVV $1, R4 + MOVB R4, ret+16(FP) + RET +cas_fail0: + MOVB R0, ret+16(FP) + RET + + // Implemented using the ll-sc instruction pair + DBAR $0x14 // LoadAcquire barrier +cas_again: + MOVV R6, R7 + LL (R4), R8 + BNE R5, R8, cas_fail1 + SC R7, (R4) + BEQ R7, cas_again + MOVV $1, R4 + MOVB R4, ret+16(FP) + DBAR $0x12 // StoreRelease barrier + RET +cas_fail1: + MOVV $0, R4 + JMP -4(PC) + +// bool cas64(uint64 *ptr, uint64 old, uint64 new) +// Atomically: +// if(*ptr == old){ +// *ptr = new; +// return 1; +// } else { +// return 0; +// } +TEXT ·Cas64(SB), NOSPLIT, $0-25 + MOVV ptr+0(FP), R4 + MOVV old+8(FP), R5 + MOVV new+16(FP), R6 + + MOVBU internal∕cpu·Loong64+const_offsetLOONG64HasLAMCAS(SB), R8 + BEQ R8, cas64_again + MOVV R5, R7 // backup old value + AMCASDBV R6, (R4), R5 + BNE R7, R5, cas64_fail0 + MOVV $1, R4 + MOVB R4, ret+24(FP) + RET +cas64_fail0: + MOVB R0, ret+24(FP) + RET + + // Implemented using the ll-sc instruction pair + DBAR $0x14 +cas64_again: + MOVV R6, R7 + LLV (R4), R8 + BNE R5, R8, cas64_fail1 + SCV R7, (R4) + BEQ R7, cas64_again + MOVV $1, R4 + MOVB R4, ret+24(FP) + DBAR $0x12 + RET +cas64_fail1: + MOVV $0, R4 + JMP -4(PC) + +TEXT ·Casint32(SB),NOSPLIT,$0-17 + JMP ·Cas(SB) + +TEXT ·Casint64(SB),NOSPLIT,$0-25 + JMP ·Cas64(SB) + +TEXT ·Casuintptr(SB), NOSPLIT, $0-25 + JMP ·Cas64(SB) + +TEXT ·CasRel(SB), NOSPLIT, $0-17 + JMP ·Cas(SB) + +TEXT ·Loaduintptr(SB), NOSPLIT|NOFRAME, $0-16 + JMP ·Load64(SB) + +TEXT ·Loaduint(SB), NOSPLIT|NOFRAME, $0-16 + JMP ·Load64(SB) + +TEXT ·Storeuintptr(SB), NOSPLIT, $0-16 + JMP ·Store64(SB) + +TEXT ·Xadduintptr(SB), NOSPLIT, $0-24 + JMP ·Xadd64(SB) + +TEXT ·Loadint64(SB), NOSPLIT, $0-16 + JMP ·Load64(SB) + +TEXT ·Xaddint32(SB),NOSPLIT,$0-20 + JMP ·Xadd(SB) + +TEXT ·Xaddint64(SB), NOSPLIT, $0-24 + JMP ·Xadd64(SB) + +// bool casp(void **val, void *old, void *new) +// Atomically: +// if(*val == old){ +// *val = new; +// return 1; +// } else +// return 0; +TEXT ·Casp1(SB), NOSPLIT, $0-25 + JMP ·Cas64(SB) + +// uint32 Xadd(uint32 volatile *ptr, int32 delta) +// Atomically: +// *val += delta; +// return *val; +TEXT ·Xadd(SB), NOSPLIT, $0-20 + MOVV ptr+0(FP), R4 + MOVW delta+8(FP), R5 + AMADDDBW R5, (R4), R6 + ADDV R6, R5, R4 + MOVW R4, ret+16(FP) + RET + +// func Xadd64(ptr *uint64, delta int64) uint64 +TEXT ·Xadd64(SB), NOSPLIT, $0-24 + MOVV ptr+0(FP), R4 + MOVV delta+8(FP), R5 + AMADDDBV R5, (R4), R6 + ADDV R6, R5, R4 + MOVV R4, ret+16(FP) + RET + +// uint8 Xchg8(ptr *uint8, new uint8) +// Atomically: +// old := *ptr; +// *ptr = new; +// return old; +TEXT ·Xchg8(SB), NOSPLIT, $0-17 + MOVV ptr+0(FP), R4 + MOVBU new+8(FP), R5 + + // R6 = ((ptr & 3) * 8) + AND $3, R4, R6 + SLLV $3, R6 + + // R7 = ((0xFF) << R6) ^ (-1) + MOVV $0xFF, R8 + SLLV R6, R8, R7 + XOR $-1, R7 + + // R4 = ptr & (~3) + MOVV $~3, R8 + AND R8, R4 + + // R5 = ((val) << R6) + SLLV R6, R5 + + DBAR $0x14 // LoadAcquire barrier +_xchg8_again: + LL (R4), R8 + MOVV R8, R9 // backup old val + AND R7, R8 + OR R5, R8 + SC R8, (R4) + BEQ R8, _xchg8_again + DBAR $0x12 // StoreRelease barrier + SRLV R6, R9, R9 + MOVBU R9, ret+16(FP) + RET + +// func Xchg(ptr *uint32, new uint32) uint32 +TEXT ·Xchg(SB), NOSPLIT, $0-20 + MOVV ptr+0(FP), R4 + MOVW new+8(FP), R5 + AMSWAPDBW R5, (R4), R6 + MOVW R6, ret+16(FP) + RET + +// func Xchg64(ptr *uint64, new uint64) uint64 +TEXT ·Xchg64(SB), NOSPLIT, $0-24 + MOVV ptr+0(FP), R4 + MOVV new+8(FP), R5 + AMSWAPDBV R5, (R4), R6 + MOVV R6, ret+16(FP) + RET + +TEXT ·Xchguintptr(SB), NOSPLIT, $0-24 + JMP ·Xchg64(SB) + +// func Xchgint32(ptr *int32, new int32) int32 +TEXT ·Xchgint32(SB), NOSPLIT, $0-20 + JMP ·Xchg(SB) + +// func Xchgint64(ptr *int64, new int64) int64 +TEXT ·Xchgint64(SB), NOSPLIT, $0-24 + JMP ·Xchg64(SB) + +TEXT ·StorepNoWB(SB), NOSPLIT, $0-16 + JMP ·Store64(SB) + +TEXT ·StoreRel(SB), NOSPLIT, $0-12 + JMP ·Store(SB) + +TEXT ·StoreRel64(SB), NOSPLIT, $0-16 + JMP ·Store64(SB) + +TEXT ·StoreReluintptr(SB), NOSPLIT, $0-16 + JMP ·Store64(SB) + +TEXT ·Store(SB), NOSPLIT, $0-12 + MOVV ptr+0(FP), R4 + MOVW val+8(FP), R5 + AMSWAPDBW R5, (R4), R0 + RET + +TEXT ·Store8(SB), NOSPLIT, $0-9 + MOVV ptr+0(FP), R4 + MOVB val+8(FP), R5 + MOVBU internal∕cpu·Loong64+const_offsetLoong64HasLAM_BH(SB), R6 + BEQ R6, _legacy_store8_ + AMSWAPDBB R5, (R4), R0 + RET +_legacy_store8_: + // StoreRelease barrier + DBAR $0x12 + MOVB R5, 0(R4) + DBAR $0x18 + RET + +TEXT ·Store64(SB), NOSPLIT, $0-16 + MOVV ptr+0(FP), R4 + MOVV val+8(FP), R5 + AMSWAPDBV R5, (R4), R0 + RET + +// void Or8(byte volatile*, byte); +TEXT ·Or8(SB), NOSPLIT, $0-9 + MOVV ptr+0(FP), R4 + MOVBU val+8(FP), R5 + // R6 = ptr & (~3) + MOVV $~3, R6 + AND R4, R6 + // R7 = ((ptr & 3) * 8) + AND $3, R4, R7 + SLLV $3, R7 + // R5 = val << R7 + SLLV R7, R5 + AMORDBW R5, (R6), R0 + RET + +// void And8(byte volatile*, byte); +TEXT ·And8(SB), NOSPLIT, $0-9 + MOVV ptr+0(FP), R4 + MOVBU val+8(FP), R5 + // R6 = ptr & (~3) + MOVV $~3, R6 + AND R4, R6 + // R7 = ((ptr & 3) * 8) + AND $3, R4, R7 + SLLV $3, R7 + // R5 = ((val ^ 0xFF) << R7) ^ (-1) + XOR $255, R5 + SLLV R7, R5 + XOR $-1, R5 + AMANDDBW R5, (R6), R0 + RET + +// func Or(addr *uint32, v uint32) +TEXT ·Or(SB), NOSPLIT, $0-12 + MOVV ptr+0(FP), R4 + MOVW val+8(FP), R5 + AMORDBW R5, (R4), R0 + RET + +// func And(addr *uint32, v uint32) +TEXT ·And(SB), NOSPLIT, $0-12 + MOVV ptr+0(FP), R4 + MOVW val+8(FP), R5 + AMANDDBW R5, (R4), R0 + RET + +// func Or32(addr *uint32, v uint32) old uint32 +TEXT ·Or32(SB), NOSPLIT, $0-20 + MOVV ptr+0(FP), R4 + MOVW val+8(FP), R5 + AMORDBW R5, (R4), R6 + MOVW R6, ret+16(FP) + RET + +// func And32(addr *uint32, v uint32) old uint32 +TEXT ·And32(SB), NOSPLIT, $0-20 + MOVV ptr+0(FP), R4 + MOVW val+8(FP), R5 + AMANDDBW R5, (R4), R6 + MOVW R6, ret+16(FP) + RET + +// func Or64(addr *uint64, v uint64) old uint64 +TEXT ·Or64(SB), NOSPLIT, $0-24 + MOVV ptr+0(FP), R4 + MOVV val+8(FP), R5 + AMORDBV R5, (R4), R6 + MOVV R6, ret+16(FP) + RET + +// func And64(addr *uint64, v uint64) old uint64 +TEXT ·And64(SB), NOSPLIT, $0-24 + MOVV ptr+0(FP), R4 + MOVV val+8(FP), R5 + AMANDDBV R5, (R4), R6 + MOVV R6, ret+16(FP) + RET + +// func Anduintptr(addr *uintptr, v uintptr) old uintptr +TEXT ·Anduintptr(SB), NOSPLIT, $0-24 + JMP ·And64(SB) + +// func Oruintptr(addr *uintptr, v uintptr) old uintptr +TEXT ·Oruintptr(SB), NOSPLIT, $0-24 + JMP ·Or64(SB) + +// uint32 internal∕runtime∕atomic·Load(uint32 volatile* ptr) +TEXT ·Load(SB),NOSPLIT|NOFRAME,$0-12 + MOVV ptr+0(FP), R19 + MOVWU 0(R19), R19 + DBAR $0x14 // LoadAcquire barrier + MOVW R19, ret+8(FP) + RET + +// uint8 internal∕runtime∕atomic·Load8(uint8 volatile* ptr) +TEXT ·Load8(SB),NOSPLIT|NOFRAME,$0-9 + MOVV ptr+0(FP), R19 + MOVBU 0(R19), R19 + DBAR $0x14 + MOVB R19, ret+8(FP) + RET + +// uint64 internal∕runtime∕atomic·Load64(uint64 volatile* ptr) +TEXT ·Load64(SB),NOSPLIT|NOFRAME,$0-16 + MOVV ptr+0(FP), R19 + MOVV 0(R19), R19 + DBAR $0x14 + MOVV R19, ret+8(FP) + RET + +// void *internal∕runtime∕atomic·Loadp(void *volatile *ptr) +TEXT ·Loadp(SB),NOSPLIT|NOFRAME,$0-16 + JMP ·Load64(SB) + +// uint32 internal∕runtime∕atomic·LoadAcq(uint32 volatile* ptr) +TEXT ·LoadAcq(SB),NOSPLIT|NOFRAME,$0-12 + JMP ·Load(SB) + +// uint64 ·LoadAcq64(uint64 volatile* ptr) +TEXT ·LoadAcq64(SB),NOSPLIT|NOFRAME,$0-16 + JMP ·Load64(SB) + +// uintptr ·LoadAcquintptr(uintptr volatile* ptr) +TEXT ·LoadAcquintptr(SB),NOSPLIT|NOFRAME,$0-16 + JMP ·Load64(SB) diff --git a/testing/internal/runtime/atomic/atomic_mips64x.go b/testing/internal/runtime/atomic/atomic_mips64x.go new file mode 100644 index 0000000..f434c93 --- /dev/null +++ b/testing/internal/runtime/atomic/atomic_mips64x.go @@ -0,0 +1,107 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build mips64 || mips64le + +package atomic + +import "unsafe" + +//go:noescape +func Xadd(ptr *uint32, delta int32) uint32 + +//go:noescape +func Xadd64(ptr *uint64, delta int64) uint64 + +//go:noescape +func Xadduintptr(ptr *uintptr, delta uintptr) uintptr + +//go:noescape +func Xchg(ptr *uint32, new uint32) uint32 + +//go:noescape +func Xchg64(ptr *uint64, new uint64) uint64 + +//go:noescape +func Xchguintptr(ptr *uintptr, new uintptr) uintptr + +//go:noescape +func Load(ptr *uint32) uint32 + +//go:noescape +func Load8(ptr *uint8) uint8 + +//go:noescape +func Load64(ptr *uint64) uint64 + +// NO go:noescape annotation; *ptr escapes if result escapes (#31525) +func Loadp(ptr unsafe.Pointer) unsafe.Pointer + +//go:noescape +func LoadAcq(ptr *uint32) uint32 + +//go:noescape +func LoadAcq64(ptr *uint64) uint64 + +//go:noescape +func LoadAcquintptr(ptr *uintptr) uintptr + +//go:noescape +func And8(ptr *uint8, val uint8) + +//go:noescape +func Or8(ptr *uint8, val uint8) + +// NOTE: Do not add atomicxor8 (XOR is not idempotent). + +//go:noescape +func And(ptr *uint32, val uint32) + +//go:noescape +func Or(ptr *uint32, val uint32) + +//go:noescape +func And32(ptr *uint32, val uint32) uint32 + +//go:noescape +func Or32(ptr *uint32, val uint32) uint32 + +//go:noescape +func And64(ptr *uint64, val uint64) uint64 + +//go:noescape +func Or64(ptr *uint64, val uint64) uint64 + +//go:noescape +func Anduintptr(ptr *uintptr, val uintptr) uintptr + +//go:noescape +func Oruintptr(ptr *uintptr, val uintptr) uintptr + +//go:noescape +func Cas64(ptr *uint64, old, new uint64) bool + +//go:noescape +func CasRel(ptr *uint32, old, new uint32) bool + +//go:noescape +func Store(ptr *uint32, val uint32) + +//go:noescape +func Store8(ptr *uint8, val uint8) + +//go:noescape +func Store64(ptr *uint64, val uint64) + +// NO go:noescape annotation; see atomic_pointer.go. +func StorepNoWB(ptr unsafe.Pointer, val unsafe.Pointer) + +//go:noescape +func StoreRel(ptr *uint32, val uint32) + +//go:noescape +func StoreRel64(ptr *uint64, val uint64) + +//go:noescape +func StoreReluintptr(ptr *uintptr, val uintptr) diff --git a/testing/internal/runtime/atomic/atomic_mips64x.s b/testing/internal/runtime/atomic/atomic_mips64x.s new file mode 100644 index 0000000..7b0e080 --- /dev/null +++ b/testing/internal/runtime/atomic/atomic_mips64x.s @@ -0,0 +1,423 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build mips64 || mips64le + +#include "textflag.h" + +#define SYNC WORD $0xf + +// bool cas(uint32 *ptr, uint32 old, uint32 new) +// Atomically: +// if(*val == old){ +// *val = new; +// return 1; +// } else +// return 0; +TEXT ·Cas(SB), NOSPLIT, $0-17 + MOVV ptr+0(FP), R1 + MOVW old+8(FP), R2 + MOVW new+12(FP), R5 + SYNC +cas_again: + MOVV R5, R3 + LL (R1), R4 + BNE R2, R4, cas_fail + SC R3, (R1) + BEQ R3, cas_again + MOVV $1, R1 + MOVB R1, ret+16(FP) + SYNC + RET +cas_fail: + MOVV $0, R1 + JMP -4(PC) + +// bool cas64(uint64 *ptr, uint64 old, uint64 new) +// Atomically: +// if(*val == old){ +// *val = new; +// return 1; +// } else { +// return 0; +// } +TEXT ·Cas64(SB), NOSPLIT, $0-25 + MOVV ptr+0(FP), R1 + MOVV old+8(FP), R2 + MOVV new+16(FP), R5 + SYNC +cas64_again: + MOVV R5, R3 + LLV (R1), R4 + BNE R2, R4, cas64_fail + SCV R3, (R1) + BEQ R3, cas64_again + MOVV $1, R1 + MOVB R1, ret+24(FP) + SYNC + RET +cas64_fail: + MOVV $0, R1 + JMP -4(PC) + +TEXT ·Casint32(SB), NOSPLIT, $0-17 + JMP ·Cas(SB) + +TEXT ·Casint64(SB), NOSPLIT, $0-25 + JMP ·Cas64(SB) + +TEXT ·Casuintptr(SB), NOSPLIT, $0-25 + JMP ·Cas64(SB) + +TEXT ·CasRel(SB), NOSPLIT, $0-17 + JMP ·Cas(SB) + +TEXT ·Loaduintptr(SB), NOSPLIT|NOFRAME, $0-16 + JMP ·Load64(SB) + +TEXT ·Loaduint(SB), NOSPLIT|NOFRAME, $0-16 + JMP ·Load64(SB) + +TEXT ·Storeint32(SB), NOSPLIT, $0-12 + JMP ·Store(SB) + +TEXT ·Storeint64(SB), NOSPLIT, $0-16 + JMP ·Store64(SB) + +TEXT ·Storeuintptr(SB), NOSPLIT, $0-16 + JMP ·Store64(SB) + +TEXT ·Xadduintptr(SB), NOSPLIT, $0-24 + JMP ·Xadd64(SB) + +TEXT ·Loadint32(SB), NOSPLIT, $0-12 + JMP ·Load(SB) + +TEXT ·Loadint64(SB), NOSPLIT, $0-16 + JMP ·Load64(SB) + +TEXT ·Xaddint32(SB), NOSPLIT, $0-20 + JMP ·Xadd(SB) + +TEXT ·Xaddint64(SB), NOSPLIT, $0-24 + JMP ·Xadd64(SB) + +// bool casp(void **val, void *old, void *new) +// Atomically: +// if(*val == old){ +// *val = new; +// return 1; +// } else +// return 0; +TEXT ·Casp1(SB), NOSPLIT, $0-25 + JMP ·Cas64(SB) + +// uint32 xadd(uint32 volatile *ptr, int32 delta) +// Atomically: +// *val += delta; +// return *val; +TEXT ·Xadd(SB), NOSPLIT, $0-20 + MOVV ptr+0(FP), R2 + MOVW delta+8(FP), R3 + SYNC + LL (R2), R1 + ADDU R1, R3, R4 + MOVV R4, R1 + SC R4, (R2) + BEQ R4, -4(PC) + MOVW R1, ret+16(FP) + SYNC + RET + +// uint64 Xadd64(uint64 volatile *ptr, int64 delta) +// Atomically: +// *val += delta; +// return *val; +TEXT ·Xadd64(SB), NOSPLIT, $0-24 + MOVV ptr+0(FP), R2 + MOVV delta+8(FP), R3 + SYNC + LLV (R2), R1 + ADDVU R1, R3, R4 + MOVV R4, R1 + SCV R4, (R2) + BEQ R4, -4(PC) + MOVV R1, ret+16(FP) + SYNC + RET + +// uint32 Xchg(ptr *uint32, new uint32) +// Atomically: +// old := *ptr; +// *ptr = new; +// return old; +TEXT ·Xchg(SB), NOSPLIT, $0-20 + MOVV ptr+0(FP), R2 + MOVW new+8(FP), R5 + + SYNC + MOVV R5, R3 + LL (R2), R1 + SC R3, (R2) + BEQ R3, -3(PC) + MOVW R1, ret+16(FP) + SYNC + RET + +// uint64 Xchg64(ptr *uint64, new uint64) +// Atomically: +// old := *ptr; +// *ptr = new; +// return old; +TEXT ·Xchg64(SB), NOSPLIT, $0-24 + MOVV ptr+0(FP), R2 + MOVV new+8(FP), R5 + + SYNC + MOVV R5, R3 + LLV (R2), R1 + SCV R3, (R2) + BEQ R3, -3(PC) + MOVV R1, ret+16(FP) + SYNC + RET + +TEXT ·Xchgint32(SB), NOSPLIT, $0-20 + JMP ·Xchg(SB) + +TEXT ·Xchgint64(SB), NOSPLIT, $0-24 + JMP ·Xchg64(SB) + +TEXT ·Xchguintptr(SB), NOSPLIT, $0-24 + JMP ·Xchg64(SB) + +TEXT ·StorepNoWB(SB), NOSPLIT, $0-16 + JMP ·Store64(SB) + +TEXT ·StoreRel(SB), NOSPLIT, $0-12 + JMP ·Store(SB) + +TEXT ·StoreRel64(SB), NOSPLIT, $0-16 + JMP ·Store64(SB) + +TEXT ·StoreReluintptr(SB), NOSPLIT, $0-16 + JMP ·Store64(SB) + +TEXT ·Store(SB), NOSPLIT, $0-12 + MOVV ptr+0(FP), R1 + MOVW val+8(FP), R2 + SYNC + MOVW R2, 0(R1) + SYNC + RET + +TEXT ·Store8(SB), NOSPLIT, $0-9 + MOVV ptr+0(FP), R1 + MOVB val+8(FP), R2 + SYNC + MOVB R2, 0(R1) + SYNC + RET + +TEXT ·Store64(SB), NOSPLIT, $0-16 + MOVV ptr+0(FP), R1 + MOVV val+8(FP), R2 + SYNC + MOVV R2, 0(R1) + SYNC + RET + +// void Or8(byte volatile*, byte); +TEXT ·Or8(SB), NOSPLIT, $0-9 + MOVV ptr+0(FP), R1 + MOVBU val+8(FP), R2 + // Align ptr down to 4 bytes so we can use 32-bit load/store. + MOVV $~3, R3 + AND R1, R3 + // Compute val shift. +#ifdef GOARCH_mips64 + // Big endian. ptr = ptr ^ 3 + XOR $3, R1 +#endif + // R4 = ((ptr & 3) * 8) + AND $3, R1, R4 + SLLV $3, R4 + // Shift val for aligned ptr. R2 = val << R4 + SLLV R4, R2 + + SYNC + LL (R3), R4 + OR R2, R4 + SC R4, (R3) + BEQ R4, -4(PC) + SYNC + RET + +// void And8(byte volatile*, byte); +TEXT ·And8(SB), NOSPLIT, $0-9 + MOVV ptr+0(FP), R1 + MOVBU val+8(FP), R2 + // Align ptr down to 4 bytes so we can use 32-bit load/store. + MOVV $~3, R3 + AND R1, R3 + // Compute val shift. +#ifdef GOARCH_mips64 + // Big endian. ptr = ptr ^ 3 + XOR $3, R1 +#endif + // R4 = ((ptr & 3) * 8) + AND $3, R1, R4 + SLLV $3, R4 + // Shift val for aligned ptr. R2 = val << R4 | ^(0xFF << R4) + MOVV $0xFF, R5 + SLLV R4, R2 + SLLV R4, R5 + NOR R0, R5 + OR R5, R2 + + SYNC + LL (R3), R4 + AND R2, R4 + SC R4, (R3) + BEQ R4, -4(PC) + SYNC + RET + +// func Or(addr *uint32, v uint32) +TEXT ·Or(SB), NOSPLIT, $0-12 + MOVV ptr+0(FP), R1 + MOVW val+8(FP), R2 + + SYNC + LL (R1), R3 + OR R2, R3 + SC R3, (R1) + BEQ R3, -4(PC) + SYNC + RET + +// func And(addr *uint32, v uint32) +TEXT ·And(SB), NOSPLIT, $0-12 + MOVV ptr+0(FP), R1 + MOVW val+8(FP), R2 + + SYNC + LL (R1), R3 + AND R2, R3 + SC R3, (R1) + BEQ R3, -4(PC) + SYNC + RET + +// func Or32(addr *uint32, v uint32) old uint32 +TEXT ·Or32(SB), NOSPLIT, $0-20 + MOVV ptr+0(FP), R1 + MOVW val+8(FP), R2 + + SYNC + LL (R1), R3 + OR R2, R3, R4 + SC R4, (R1) + BEQ R4, -3(PC) + SYNC + MOVW R3, ret+16(FP) + RET + +// func And32(addr *uint32, v uint32) old uint32 +TEXT ·And32(SB), NOSPLIT, $0-20 + MOVV ptr+0(FP), R1 + MOVW val+8(FP), R2 + + SYNC + LL (R1), R3 + AND R2, R3, R4 + SC R4, (R1) + BEQ R4, -3(PC) + SYNC + MOVW R3, ret+16(FP) + RET + +// func Or64(addr *uint64, v uint64) old uint64 +TEXT ·Or64(SB), NOSPLIT, $0-24 + MOVV ptr+0(FP), R1 + MOVV val+8(FP), R2 + + SYNC + LLV (R1), R3 + OR R2, R3, R4 + SCV R4, (R1) + BEQ R4, -3(PC) + SYNC + MOVV R3, ret+16(FP) + RET + +// func And64(addr *uint64, v uint64) old uint64 +TEXT ·And64(SB), NOSPLIT, $0-24 + MOVV ptr+0(FP), R1 + MOVV val+8(FP), R2 + + SYNC + LLV (R1), R3 + AND R2, R3, R4 + SCV R4, (R1) + BEQ R4, -3(PC) + SYNC + MOVV R3, ret+16(FP) + RET + +// func Anduintptr(addr *uintptr, v uintptr) old uintptr +TEXT ·Anduintptr(SB), NOSPLIT, $0-24 + JMP ·And64(SB) + +// func Oruintptr(addr *uintptr, v uintptr) old uintptr +TEXT ·Oruintptr(SB), NOSPLIT, $0-24 + JMP ·Or64(SB) + +// uint32 ·Load(uint32 volatile* ptr) +TEXT ·Load(SB),NOSPLIT|NOFRAME,$0-12 + MOVV ptr+0(FP), R1 + SYNC + MOVWU 0(R1), R1 + SYNC + MOVW R1, ret+8(FP) + RET + +// uint8 ·Load8(uint8 volatile* ptr) +TEXT ·Load8(SB),NOSPLIT|NOFRAME,$0-9 + MOVV ptr+0(FP), R1 + SYNC + MOVBU 0(R1), R1 + SYNC + MOVB R1, ret+8(FP) + RET + +// uint64 ·Load64(uint64 volatile* ptr) +TEXT ·Load64(SB),NOSPLIT|NOFRAME,$0-16 + MOVV ptr+0(FP), R1 + SYNC + MOVV 0(R1), R1 + SYNC + MOVV R1, ret+8(FP) + RET + +// void *·Loadp(void *volatile *ptr) +TEXT ·Loadp(SB),NOSPLIT|NOFRAME,$0-16 + MOVV ptr+0(FP), R1 + SYNC + MOVV 0(R1), R1 + SYNC + MOVV R1, ret+8(FP) + RET + +// uint32 ·LoadAcq(uint32 volatile* ptr) +TEXT ·LoadAcq(SB),NOSPLIT|NOFRAME,$0-12 + JMP atomic·Load(SB) + +// uint64 ·LoadAcq64(uint64 volatile* ptr) +TEXT ·LoadAcq64(SB),NOSPLIT|NOFRAME,$0-16 + JMP atomic·Load64(SB) + +// uintptr ·LoadAcquintptr(uintptr volatile* ptr) +TEXT ·LoadAcquintptr(SB),NOSPLIT|NOFRAME,$0-16 + JMP atomic·Load64(SB) diff --git a/testing/internal/runtime/atomic/atomic_mipsx.go b/testing/internal/runtime/atomic/atomic_mipsx.go new file mode 100644 index 0000000..6e625ca --- /dev/null +++ b/testing/internal/runtime/atomic/atomic_mipsx.go @@ -0,0 +1,197 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build mips || mipsle + +// Export some functions via linkname to assembly in sync/atomic. +// +//go:linkname Xadd64 +//go:linkname Xchg64 +//go:linkname Cas64 +//go:linkname Load64 +//go:linkname Store64 +//go:linkname Or64 +//go:linkname And64 + +package atomic + +import ( + "unsafe" + + "github.com/CodSpeedHQ/codspeed-go/testing/internal/cpu" +) + +// TODO implement lock striping +var lock struct { + state uint32 + pad [cpu.CacheLinePadSize - 4]byte +} + +//go:noescape +func spinLock(state *uint32) + +//go:noescape +func spinUnlock(state *uint32) + +//go:nosplit +func lockAndCheck(addr *uint64) { + // ensure 8-byte alignment + if uintptr(unsafe.Pointer(addr))&7 != 0 { + panicUnaligned() + } + // force dereference before taking lock + _ = *addr + + spinLock(&lock.state) +} + +//go:nosplit +func unlock() { + spinUnlock(&lock.state) +} + +//go:nosplit +func Xadd64(addr *uint64, delta int64) (new uint64) { + lockAndCheck(addr) + + new = *addr + uint64(delta) + *addr = new + + unlock() + return +} + +//go:nosplit +func Xchg64(addr *uint64, new uint64) (old uint64) { + lockAndCheck(addr) + + old = *addr + *addr = new + + unlock() + return +} + +//go:nosplit +func Cas64(addr *uint64, old, new uint64) (swapped bool) { + lockAndCheck(addr) + + if (*addr) == old { + *addr = new + unlock() + return true + } + + unlock() + return false +} + +//go:nosplit +func Load64(addr *uint64) (val uint64) { + lockAndCheck(addr) + + val = *addr + + unlock() + return +} + +//go:nosplit +func Store64(addr *uint64, val uint64) { + lockAndCheck(addr) + + *addr = val + + unlock() + return +} + +//go:nosplit +func Or64(addr *uint64, val uint64) (old uint64) { + for { + old = *addr + if Cas64(addr, old, old|val) { + return old + } + } +} + +//go:nosplit +func And64(addr *uint64, val uint64) (old uint64) { + for { + old = *addr + if Cas64(addr, old, old&val) { + return old + } + } +} + +//go:noescape +func Xadd(ptr *uint32, delta int32) uint32 + +//go:noescape +func Xadduintptr(ptr *uintptr, delta uintptr) uintptr + +//go:noescape +func Xchg(ptr *uint32, new uint32) uint32 + +//go:noescape +func Xchguintptr(ptr *uintptr, new uintptr) uintptr + +//go:noescape +func Load(ptr *uint32) uint32 + +//go:noescape +func Load8(ptr *uint8) uint8 + +// NO go:noescape annotation; *ptr escapes if result escapes (#31525) +func Loadp(ptr unsafe.Pointer) unsafe.Pointer + +//go:noescape +func LoadAcq(ptr *uint32) uint32 + +//go:noescape +func LoadAcquintptr(ptr *uintptr) uintptr + +//go:noescape +func And8(ptr *uint8, val uint8) + +//go:noescape +func Or8(ptr *uint8, val uint8) + +//go:noescape +func And(ptr *uint32, val uint32) + +//go:noescape +func Or(ptr *uint32, val uint32) + +//go:noescape +func And32(ptr *uint32, val uint32) uint32 + +//go:noescape +func Or32(ptr *uint32, val uint32) uint32 + +//go:noescape +func Anduintptr(ptr *uintptr, val uintptr) uintptr + +//go:noescape +func Oruintptr(ptr *uintptr, val uintptr) uintptr + +//go:noescape +func Store(ptr *uint32, val uint32) + +//go:noescape +func Store8(ptr *uint8, val uint8) + +// NO go:noescape annotation; see atomic_pointer.go. +func StorepNoWB(ptr unsafe.Pointer, val unsafe.Pointer) + +//go:noescape +func StoreRel(ptr *uint32, val uint32) + +//go:noescape +func StoreReluintptr(ptr *uintptr, val uintptr) + +//go:noescape +func CasRel(addr *uint32, old, new uint32) bool diff --git a/testing/internal/runtime/atomic/atomic_mipsx.s b/testing/internal/runtime/atomic/atomic_mipsx.s new file mode 100644 index 0000000..4ccc0a3 --- /dev/null +++ b/testing/internal/runtime/atomic/atomic_mipsx.s @@ -0,0 +1,298 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build mips || mipsle + +#include "textflag.h" + +// bool Cas(int32 *val, int32 old, int32 new) +// Atomically: +// if(*val == old){ +// *val = new; +// return 1; +// } else +// return 0; +TEXT ·Cas(SB),NOSPLIT,$0-13 + MOVW ptr+0(FP), R1 + MOVW old+4(FP), R2 + MOVW new+8(FP), R5 + SYNC +try_cas: + MOVW R5, R3 + LL (R1), R4 // R4 = *R1 + BNE R2, R4, cas_fail + SC R3, (R1) // *R1 = R3 + BEQ R3, try_cas + SYNC + MOVB R3, ret+12(FP) + RET +cas_fail: + SYNC + MOVB R0, ret+12(FP) + RET + +TEXT ·Store(SB),NOSPLIT,$0-8 + MOVW ptr+0(FP), R1 + MOVW val+4(FP), R2 + SYNC + MOVW R2, 0(R1) + SYNC + RET + +TEXT ·Store8(SB),NOSPLIT,$0-5 + MOVW ptr+0(FP), R1 + MOVB val+4(FP), R2 + SYNC + MOVB R2, 0(R1) + SYNC + RET + +TEXT ·Load(SB),NOSPLIT,$0-8 + MOVW ptr+0(FP), R1 + SYNC + MOVW 0(R1), R1 + SYNC + MOVW R1, ret+4(FP) + RET + +TEXT ·Load8(SB),NOSPLIT,$0-5 + MOVW ptr+0(FP), R1 + SYNC + MOVB 0(R1), R1 + SYNC + MOVB R1, ret+4(FP) + RET + +// uint32 Xadd(uint32 volatile *val, int32 delta) +// Atomically: +// *val += delta; +// return *val; +TEXT ·Xadd(SB),NOSPLIT,$0-12 + MOVW ptr+0(FP), R2 + MOVW delta+4(FP), R3 + SYNC +try_xadd: + LL (R2), R1 // R1 = *R2 + ADDU R1, R3, R4 + MOVW R4, R1 + SC R4, (R2) // *R2 = R4 + BEQ R4, try_xadd + SYNC + MOVW R1, ret+8(FP) + RET + +// uint32 Xchg(ptr *uint32, new uint32) +// Atomically: +// old := *ptr; +// *ptr = new; +// return old; +TEXT ·Xchg(SB),NOSPLIT,$0-12 + MOVW ptr+0(FP), R2 + MOVW new+4(FP), R5 + SYNC +try_xchg: + MOVW R5, R3 + LL (R2), R1 // R1 = *R2 + SC R3, (R2) // *R2 = R3 + BEQ R3, try_xchg + SYNC + MOVW R1, ret+8(FP) + RET + +TEXT ·Casint32(SB),NOSPLIT,$0-13 + JMP ·Cas(SB) + +TEXT ·Casint64(SB),NOSPLIT,$0-21 + JMP ·Cas64(SB) + +TEXT ·Casuintptr(SB),NOSPLIT,$0-13 + JMP ·Cas(SB) + +TEXT ·CasRel(SB),NOSPLIT,$0-13 + JMP ·Cas(SB) + +TEXT ·Loaduintptr(SB),NOSPLIT,$0-8 + JMP ·Load(SB) + +TEXT ·Loaduint(SB),NOSPLIT,$0-8 + JMP ·Load(SB) + +TEXT ·Loadp(SB),NOSPLIT,$-0-8 + JMP ·Load(SB) + +TEXT ·Storeint32(SB),NOSPLIT,$0-8 + JMP ·Store(SB) + +TEXT ·Storeint64(SB),NOSPLIT,$0-12 + JMP ·Store64(SB) + +TEXT ·Storeuintptr(SB),NOSPLIT,$0-8 + JMP ·Store(SB) + +TEXT ·Xadduintptr(SB),NOSPLIT,$0-12 + JMP ·Xadd(SB) + +TEXT ·Loadint32(SB),NOSPLIT,$0-8 + JMP ·Load(SB) + +TEXT ·Loadint64(SB),NOSPLIT,$0-12 + JMP ·Load64(SB) + +TEXT ·Xaddint32(SB),NOSPLIT,$0-12 + JMP ·Xadd(SB) + +TEXT ·Xaddint64(SB),NOSPLIT,$0-20 + JMP ·Xadd64(SB) + +TEXT ·Casp1(SB),NOSPLIT,$0-13 + JMP ·Cas(SB) + +TEXT ·Xchgint32(SB),NOSPLIT,$0-12 + JMP ·Xchg(SB) + +TEXT ·Xchgint64(SB),NOSPLIT,$0-20 + JMP ·Xchg64(SB) + +TEXT ·Xchguintptr(SB),NOSPLIT,$0-12 + JMP ·Xchg(SB) + +TEXT ·StorepNoWB(SB),NOSPLIT,$0-8 + JMP ·Store(SB) + +TEXT ·StoreRel(SB),NOSPLIT,$0-8 + JMP ·Store(SB) + +TEXT ·StoreReluintptr(SB),NOSPLIT,$0-8 + JMP ·Store(SB) + +// void Or8(byte volatile*, byte); +TEXT ·Or8(SB),NOSPLIT,$0-5 + MOVW ptr+0(FP), R1 + MOVBU val+4(FP), R2 + MOVW $~3, R3 // Align ptr down to 4 bytes so we can use 32-bit load/store. + AND R1, R3 +#ifdef GOARCH_mips + // Big endian. ptr = ptr ^ 3 + XOR $3, R1 +#endif + AND $3, R1, R4 // R4 = ((ptr & 3) * 8) + SLL $3, R4 + SLL R4, R2, R2 // Shift val for aligned ptr. R2 = val << R4 + SYNC +try_or8: + LL (R3), R4 // R4 = *R3 + OR R2, R4 + SC R4, (R3) // *R3 = R4 + BEQ R4, try_or8 + SYNC + RET + +// void And8(byte volatile*, byte); +TEXT ·And8(SB),NOSPLIT,$0-5 + MOVW ptr+0(FP), R1 + MOVBU val+4(FP), R2 + MOVW $~3, R3 + AND R1, R3 +#ifdef GOARCH_mips + // Big endian. ptr = ptr ^ 3 + XOR $3, R1 +#endif + AND $3, R1, R4 // R4 = ((ptr & 3) * 8) + SLL $3, R4 + MOVW $0xFF, R5 + SLL R4, R2 + SLL R4, R5 + NOR R0, R5 + OR R5, R2 // Shift val for aligned ptr. R2 = val << R4 | ^(0xFF << R4) + SYNC +try_and8: + LL (R3), R4 // R4 = *R3 + AND R2, R4 + SC R4, (R3) // *R3 = R4 + BEQ R4, try_and8 + SYNC + RET + +// func Or(addr *uint32, v uint32) +TEXT ·Or(SB), NOSPLIT, $0-8 + MOVW ptr+0(FP), R1 + MOVW val+4(FP), R2 + + SYNC + LL (R1), R3 + OR R2, R3 + SC R3, (R1) + BEQ R3, -4(PC) + SYNC + RET + +// func And(addr *uint32, v uint32) +TEXT ·And(SB), NOSPLIT, $0-8 + MOVW ptr+0(FP), R1 + MOVW val+4(FP), R2 + + SYNC + LL (R1), R3 + AND R2, R3 + SC R3, (R1) + BEQ R3, -4(PC) + SYNC + RET + +// func Or32(addr *uint32, v uint32) old uint32 +TEXT ·Or32(SB), NOSPLIT, $0-12 + MOVW ptr+0(FP), R1 + MOVW val+4(FP), R2 + + SYNC + LL (R1), R3 + OR R2, R3, R4 + SC R4, (R1) + BEQ R4, -4(PC) + SYNC + MOVW R3, ret+8(FP) + RET + +// func And32(addr *uint32, v uint32) old uint32 +TEXT ·And32(SB), NOSPLIT, $0-12 + MOVW ptr+0(FP), R1 + MOVW val+4(FP), R2 + + SYNC + LL (R1), R3 + AND R2, R3, R4 + SC R4, (R1) + BEQ R4, -4(PC) + SYNC + MOVW R3, ret+8(FP) + RET + +// func Anduintptr(addr *uintptr, v uintptr) old uintptr +TEXT ·Anduintptr(SB), NOSPLIT, $0-12 + JMP ·And32(SB) + +// func Oruintptr(addr *uintptr, v uintptr) old uintptr +TEXT ·Oruintptr(SB), NOSPLIT, $0-12 + JMP ·Or32(SB) + +TEXT ·spinLock(SB),NOSPLIT,$0-4 + MOVW state+0(FP), R1 + MOVW $1, R2 + SYNC +try_lock: + MOVW R2, R3 +check_again: + LL (R1), R4 + BNE R4, check_again + SC R3, (R1) + BEQ R3, try_lock + SYNC + RET + +TEXT ·spinUnlock(SB),NOSPLIT,$0-4 + MOVW state+0(FP), R1 + SYNC + MOVW R0, (R1) + SYNC + RET diff --git a/testing/internal/runtime/atomic/atomic_ppc64x.go b/testing/internal/runtime/atomic/atomic_ppc64x.go new file mode 100644 index 0000000..590ba03 --- /dev/null +++ b/testing/internal/runtime/atomic/atomic_ppc64x.go @@ -0,0 +1,110 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build ppc64 || ppc64le + +package atomic + +import "unsafe" + +//go:noescape +func Xadd(ptr *uint32, delta int32) uint32 + +//go:noescape +func Xadd64(ptr *uint64, delta int64) uint64 + +//go:noescape +func Xadduintptr(ptr *uintptr, delta uintptr) uintptr + +//go:noescape +func Xchg8(ptr *uint8, new uint8) uint8 + +//go:noescape +func Xchg(ptr *uint32, new uint32) uint32 + +//go:noescape +func Xchg64(ptr *uint64, new uint64) uint64 + +//go:noescape +func Xchguintptr(ptr *uintptr, new uintptr) uintptr + +//go:noescape +func Load(ptr *uint32) uint32 + +//go:noescape +func Load8(ptr *uint8) uint8 + +//go:noescape +func Load64(ptr *uint64) uint64 + +// NO go:noescape annotation; *ptr escapes if result escapes (#31525) +func Loadp(ptr unsafe.Pointer) unsafe.Pointer + +//go:noescape +func LoadAcq(ptr *uint32) uint32 + +//go:noescape +func LoadAcq64(ptr *uint64) uint64 + +//go:noescape +func LoadAcquintptr(ptr *uintptr) uintptr + +//go:noescape +func And8(ptr *uint8, val uint8) + +//go:noescape +func Or8(ptr *uint8, val uint8) + +// NOTE: Do not add atomicxor8 (XOR is not idempotent). + +//go:noescape +func And(ptr *uint32, val uint32) + +//go:noescape +func Or(ptr *uint32, val uint32) + +//go:noescape +func And32(ptr *uint32, val uint32) uint32 + +//go:noescape +func Or32(ptr *uint32, val uint32) uint32 + +//go:noescape +func And64(ptr *uint64, val uint64) uint64 + +//go:noescape +func Or64(ptr *uint64, val uint64) uint64 + +//go:noescape +func Anduintptr(ptr *uintptr, val uintptr) uintptr + +//go:noescape +func Oruintptr(ptr *uintptr, val uintptr) uintptr + +//go:noescape +func Cas64(ptr *uint64, old, new uint64) bool + +//go:noescape +func CasRel(ptr *uint32, old, new uint32) bool + +//go:noescape +func Store(ptr *uint32, val uint32) + +//go:noescape +func Store8(ptr *uint8, val uint8) + +//go:noescape +func Store64(ptr *uint64, val uint64) + +//go:noescape +func StoreRel(ptr *uint32, val uint32) + +//go:noescape +func StoreRel64(ptr *uint64, val uint64) + +//go:noescape +func StoreReluintptr(ptr *uintptr, val uintptr) + +// NO go:noescape annotation; see atomic_pointer.go. +func StorepNoWB(ptr unsafe.Pointer, val unsafe.Pointer) diff --git a/testing/internal/runtime/atomic/atomic_ppc64x.s b/testing/internal/runtime/atomic/atomic_ppc64x.s new file mode 100644 index 0000000..184a30c --- /dev/null +++ b/testing/internal/runtime/atomic/atomic_ppc64x.s @@ -0,0 +1,440 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build ppc64 || ppc64le + +#include "textflag.h" + +// For more details about how various memory models are +// enforced on POWER, the following paper provides more +// details about how they enforce C/C++ like models. This +// gives context about why the strange looking code +// sequences below work. +// +// http://www.rdrop.com/users/paulmck/scalability/paper/N2745r.2011.03.04a.html + +// uint32 ·Load(uint32 volatile* ptr) +TEXT ·Load(SB),NOSPLIT|NOFRAME,$-8-12 + MOVD ptr+0(FP), R3 + SYNC + MOVWZ 0(R3), R3 + CMPW R3, R3, CR7 + BC 4, 30, 1(PC) // bne- cr7,0x4 + ISYNC + MOVW R3, ret+8(FP) + RET + +// uint8 ·Load8(uint8 volatile* ptr) +TEXT ·Load8(SB),NOSPLIT|NOFRAME,$-8-9 + MOVD ptr+0(FP), R3 + SYNC + MOVBZ 0(R3), R3 + CMP R3, R3, CR7 + BC 4, 30, 1(PC) // bne- cr7,0x4 + ISYNC + MOVB R3, ret+8(FP) + RET + +// uint64 ·Load64(uint64 volatile* ptr) +TEXT ·Load64(SB),NOSPLIT|NOFRAME,$-8-16 + MOVD ptr+0(FP), R3 + SYNC + MOVD 0(R3), R3 + CMP R3, R3, CR7 + BC 4, 30, 1(PC) // bne- cr7,0x4 + ISYNC + MOVD R3, ret+8(FP) + RET + +// void *·Loadp(void *volatile *ptr) +TEXT ·Loadp(SB),NOSPLIT|NOFRAME,$-8-16 + MOVD ptr+0(FP), R3 + SYNC + MOVD 0(R3), R3 + CMP R3, R3, CR7 + BC 4, 30, 1(PC) // bne- cr7,0x4 + ISYNC + MOVD R3, ret+8(FP) + RET + +// uint32 ·LoadAcq(uint32 volatile* ptr) +TEXT ·LoadAcq(SB),NOSPLIT|NOFRAME,$-8-12 + MOVD ptr+0(FP), R3 + MOVWZ 0(R3), R3 + CMPW R3, R3, CR7 + BC 4, 30, 1(PC) // bne- cr7, 0x4 + ISYNC + MOVW R3, ret+8(FP) + RET + +// uint64 ·LoadAcq64(uint64 volatile* ptr) +TEXT ·LoadAcq64(SB),NOSPLIT|NOFRAME,$-8-16 + MOVD ptr+0(FP), R3 + MOVD 0(R3), R3 + CMP R3, R3, CR7 + BC 4, 30, 1(PC) // bne- cr7, 0x4 + ISYNC + MOVD R3, ret+8(FP) + RET + +// bool cas(uint32 *ptr, uint32 old, uint32 new) +// Atomically: +// if(*val == old){ +// *val = new; +// return 1; +// } else +// return 0; +TEXT ·Cas(SB), NOSPLIT, $0-17 + MOVD ptr+0(FP), R3 + MOVWZ old+8(FP), R4 + MOVWZ new+12(FP), R5 + LWSYNC +cas_again: + LWAR (R3), R6 + CMPW R6, R4 + BNE cas_fail + STWCCC R5, (R3) + BNE cas_again + MOVD $1, R3 + LWSYNC + MOVB R3, ret+16(FP) + RET +cas_fail: + LWSYNC + MOVB R0, ret+16(FP) + RET + +// bool ·Cas64(uint64 *ptr, uint64 old, uint64 new) +// Atomically: +// if(*val == old){ +// *val = new; +// return 1; +// } else { +// return 0; +// } +TEXT ·Cas64(SB), NOSPLIT, $0-25 + MOVD ptr+0(FP), R3 + MOVD old+8(FP), R4 + MOVD new+16(FP), R5 + LWSYNC +cas64_again: + LDAR (R3), R6 + CMP R6, R4 + BNE cas64_fail + STDCCC R5, (R3) + BNE cas64_again + MOVD $1, R3 + LWSYNC + MOVB R3, ret+24(FP) + RET +cas64_fail: + LWSYNC + MOVB R0, ret+24(FP) + RET + +TEXT ·CasRel(SB), NOSPLIT, $0-17 + MOVD ptr+0(FP), R3 + MOVWZ old+8(FP), R4 + MOVWZ new+12(FP), R5 + LWSYNC +cas_again: + LWAR (R3), $0, R6 // 0 = Mutex release hint + CMPW R6, R4 + BNE cas_fail + STWCCC R5, (R3) + BNE cas_again + MOVD $1, R3 + MOVB R3, ret+16(FP) + RET +cas_fail: + MOVB R0, ret+16(FP) + RET + +TEXT ·Casint32(SB), NOSPLIT, $0-17 + BR ·Cas(SB) + +TEXT ·Casint64(SB), NOSPLIT, $0-25 + BR ·Cas64(SB) + +TEXT ·Casuintptr(SB), NOSPLIT, $0-25 + BR ·Cas64(SB) + +TEXT ·Loaduintptr(SB), NOSPLIT|NOFRAME, $0-16 + BR ·Load64(SB) + +TEXT ·LoadAcquintptr(SB), NOSPLIT|NOFRAME, $0-16 + BR ·LoadAcq64(SB) + +TEXT ·Loaduint(SB), NOSPLIT|NOFRAME, $0-16 + BR ·Load64(SB) + +TEXT ·Storeint32(SB), NOSPLIT, $0-12 + BR ·Store(SB) + +TEXT ·Storeint64(SB), NOSPLIT, $0-16 + BR ·Store64(SB) + +TEXT ·Storeuintptr(SB), NOSPLIT, $0-16 + BR ·Store64(SB) + +TEXT ·StoreReluintptr(SB), NOSPLIT, $0-16 + BR ·StoreRel64(SB) + +TEXT ·Xadduintptr(SB), NOSPLIT, $0-24 + BR ·Xadd64(SB) + +TEXT ·Loadint32(SB), NOSPLIT, $0-12 + BR ·Load(SB) + +TEXT ·Loadint64(SB), NOSPLIT, $0-16 + BR ·Load64(SB) + +TEXT ·Xaddint32(SB), NOSPLIT, $0-20 + BR ·Xadd(SB) + +TEXT ·Xaddint64(SB), NOSPLIT, $0-24 + BR ·Xadd64(SB) + +// bool casp(void **val, void *old, void *new) +// Atomically: +// if(*val == old){ +// *val = new; +// return 1; +// } else +// return 0; +TEXT ·Casp1(SB), NOSPLIT, $0-25 + BR ·Cas64(SB) + +// uint32 xadd(uint32 volatile *ptr, int32 delta) +// Atomically: +// *val += delta; +// return *val; +TEXT ·Xadd(SB), NOSPLIT, $0-20 + MOVD ptr+0(FP), R4 + MOVW delta+8(FP), R5 + LWSYNC + LWAR (R4), R3 + ADD R5, R3 + STWCCC R3, (R4) + BNE -3(PC) + MOVW R3, ret+16(FP) + RET + +// uint64 Xadd64(uint64 volatile *val, int64 delta) +// Atomically: +// *val += delta; +// return *val; +TEXT ·Xadd64(SB), NOSPLIT, $0-24 + MOVD ptr+0(FP), R4 + MOVD delta+8(FP), R5 + LWSYNC + LDAR (R4), R3 + ADD R5, R3 + STDCCC R3, (R4) + BNE -3(PC) + MOVD R3, ret+16(FP) + RET + +// uint8 Xchg(ptr *uint8, new uint8) +// Atomically: +// old := *ptr; +// *ptr = new; +// return old; +TEXT ·Xchg8(SB), NOSPLIT, $0-17 + MOVD ptr+0(FP), R4 + MOVB new+8(FP), R5 + LWSYNC + LBAR (R4), R3 + STBCCC R5, (R4) + BNE -2(PC) + ISYNC + MOVB R3, ret+16(FP) + RET + +// uint32 Xchg(ptr *uint32, new uint32) +// Atomically: +// old := *ptr; +// *ptr = new; +// return old; +TEXT ·Xchg(SB), NOSPLIT, $0-20 + MOVD ptr+0(FP), R4 + MOVW new+8(FP), R5 + LWSYNC + LWAR (R4), R3 + STWCCC R5, (R4) + BNE -2(PC) + ISYNC + MOVW R3, ret+16(FP) + RET + +// uint64 Xchg64(ptr *uint64, new uint64) +// Atomically: +// old := *ptr; +// *ptr = new; +// return old; +TEXT ·Xchg64(SB), NOSPLIT, $0-24 + MOVD ptr+0(FP), R4 + MOVD new+8(FP), R5 + LWSYNC + LDAR (R4), R3 + STDCCC R5, (R4) + BNE -2(PC) + ISYNC + MOVD R3, ret+16(FP) + RET + +TEXT ·Xchgint32(SB), NOSPLIT, $0-20 + BR ·Xchg(SB) + +TEXT ·Xchgint64(SB), NOSPLIT, $0-24 + BR ·Xchg64(SB) + +TEXT ·Xchguintptr(SB), NOSPLIT, $0-24 + BR ·Xchg64(SB) + +TEXT ·StorepNoWB(SB), NOSPLIT, $0-16 + BR ·Store64(SB) + +TEXT ·Store(SB), NOSPLIT, $0-12 + MOVD ptr+0(FP), R3 + MOVW val+8(FP), R4 + SYNC + MOVW R4, 0(R3) + RET + +TEXT ·Store8(SB), NOSPLIT, $0-9 + MOVD ptr+0(FP), R3 + MOVB val+8(FP), R4 + SYNC + MOVB R4, 0(R3) + RET + +TEXT ·Store64(SB), NOSPLIT, $0-16 + MOVD ptr+0(FP), R3 + MOVD val+8(FP), R4 + SYNC + MOVD R4, 0(R3) + RET + +TEXT ·StoreRel(SB), NOSPLIT, $0-12 + MOVD ptr+0(FP), R3 + MOVW val+8(FP), R4 + LWSYNC + MOVW R4, 0(R3) + RET + +TEXT ·StoreRel64(SB), NOSPLIT, $0-16 + MOVD ptr+0(FP), R3 + MOVD val+8(FP), R4 + LWSYNC + MOVD R4, 0(R3) + RET + +// void ·Or8(byte volatile*, byte); +TEXT ·Or8(SB), NOSPLIT, $0-9 + MOVD ptr+0(FP), R3 + MOVBZ val+8(FP), R4 + LWSYNC +again: + LBAR (R3), R6 + OR R4, R6 + STBCCC R6, (R3) + BNE again + RET + +// void ·And8(byte volatile*, byte); +TEXT ·And8(SB), NOSPLIT, $0-9 + MOVD ptr+0(FP), R3 + MOVBZ val+8(FP), R4 + LWSYNC +again: + LBAR (R3), R6 + AND R4, R6 + STBCCC R6, (R3) + BNE again + RET + +// func Or(addr *uint32, v uint32) +TEXT ·Or(SB), NOSPLIT, $0-12 + MOVD ptr+0(FP), R3 + MOVW val+8(FP), R4 + LWSYNC +again: + LWAR (R3), R6 + OR R4, R6 + STWCCC R6, (R3) + BNE again + RET + +// func And(addr *uint32, v uint32) +TEXT ·And(SB), NOSPLIT, $0-12 + MOVD ptr+0(FP), R3 + MOVW val+8(FP), R4 + LWSYNC +again: + LWAR (R3),R6 + AND R4, R6 + STWCCC R6, (R3) + BNE again + RET + +// func Or32(addr *uint32, v uint32) old uint32 +TEXT ·Or32(SB), NOSPLIT, $0-20 + MOVD ptr+0(FP), R3 + MOVW val+8(FP), R4 + LWSYNC +again: + LWAR (R3), R6 + OR R4, R6, R7 + STWCCC R7, (R3) + BNE again + MOVW R6, ret+16(FP) + RET + +// func And32(addr *uint32, v uint32) old uint32 +TEXT ·And32(SB), NOSPLIT, $0-20 + MOVD ptr+0(FP), R3 + MOVW val+8(FP), R4 + LWSYNC +again: + LWAR (R3),R6 + AND R4, R6, R7 + STWCCC R7, (R3) + BNE again + MOVW R6, ret+16(FP) + RET + +// func Or64(addr *uint64, v uint64) old uint64 +TEXT ·Or64(SB), NOSPLIT, $0-24 + MOVD ptr+0(FP), R3 + MOVD val+8(FP), R4 + LWSYNC +again: + LDAR (R3), R6 + OR R4, R6, R7 + STDCCC R7, (R3) + BNE again + MOVD R6, ret+16(FP) + RET + +// func And64(addr *uint64, v uint64) old uint64 +TEXT ·And64(SB), NOSPLIT, $0-24 + MOVD ptr+0(FP), R3 + MOVD val+8(FP), R4 + LWSYNC +again: + LDAR (R3),R6 + AND R4, R6, R7 + STDCCC R7, (R3) + BNE again + MOVD R6, ret+16(FP) + RET + +// func Anduintptr(addr *uintptr, v uintptr) old uintptr +TEXT ·Anduintptr(SB), NOSPLIT, $0-24 + JMP ·And64(SB) + +// func Oruintptr(addr *uintptr, v uintptr) old uintptr +TEXT ·Oruintptr(SB), NOSPLIT, $0-24 + JMP ·Or64(SB) diff --git a/testing/internal/runtime/atomic/atomic_riscv64.go b/testing/internal/runtime/atomic/atomic_riscv64.go new file mode 100644 index 0000000..9fc3837 --- /dev/null +++ b/testing/internal/runtime/atomic/atomic_riscv64.go @@ -0,0 +1,103 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package atomic + +import "unsafe" + +//go:noescape +func Xadd(ptr *uint32, delta int32) uint32 + +//go:noescape +func Xadd64(ptr *uint64, delta int64) uint64 + +//go:noescape +func Xadduintptr(ptr *uintptr, delta uintptr) uintptr + +//go:noescape +func Xchg(ptr *uint32, new uint32) uint32 + +//go:noescape +func Xchg64(ptr *uint64, new uint64) uint64 + +//go:noescape +func Xchguintptr(ptr *uintptr, new uintptr) uintptr + +//go:noescape +func Load(ptr *uint32) uint32 + +//go:noescape +func Load8(ptr *uint8) uint8 + +//go:noescape +func Load64(ptr *uint64) uint64 + +// NO go:noescape annotation; *ptr escapes if result escapes (#31525) +func Loadp(ptr unsafe.Pointer) unsafe.Pointer + +//go:noescape +func LoadAcq(ptr *uint32) uint32 + +//go:noescape +func LoadAcq64(ptr *uint64) uint64 + +//go:noescape +func LoadAcquintptr(ptr *uintptr) uintptr + +//go:noescape +func Or8(ptr *uint8, val uint8) + +//go:noescape +func And8(ptr *uint8, val uint8) + +//go:noescape +func And(ptr *uint32, val uint32) + +//go:noescape +func Or(ptr *uint32, val uint32) + +//go:noescape +func And32(ptr *uint32, val uint32) uint32 + +//go:noescape +func Or32(ptr *uint32, val uint32) uint32 + +//go:noescape +func And64(ptr *uint64, val uint64) uint64 + +//go:noescape +func Or64(ptr *uint64, val uint64) uint64 + +//go:noescape +func Anduintptr(ptr *uintptr, val uintptr) uintptr + +//go:noescape +func Oruintptr(ptr *uintptr, val uintptr) uintptr + +//go:noescape +func Cas64(ptr *uint64, old, new uint64) bool + +//go:noescape +func CasRel(ptr *uint32, old, new uint32) bool + +//go:noescape +func Store(ptr *uint32, val uint32) + +//go:noescape +func Store8(ptr *uint8, val uint8) + +//go:noescape +func Store64(ptr *uint64, val uint64) + +// NO go:noescape annotation; see atomic_pointer.go. +func StorepNoWB(ptr unsafe.Pointer, val unsafe.Pointer) + +//go:noescape +func StoreRel(ptr *uint32, val uint32) + +//go:noescape +func StoreRel64(ptr *uint64, val uint64) + +//go:noescape +func StoreReluintptr(ptr *uintptr, val uintptr) diff --git a/testing/internal/runtime/atomic/atomic_riscv64.s b/testing/internal/runtime/atomic/atomic_riscv64.s new file mode 100644 index 0000000..bf6bd35 --- /dev/null +++ b/testing/internal/runtime/atomic/atomic_riscv64.s @@ -0,0 +1,324 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// RISC-V's atomic operations have two bits, aq ("acquire") and rl ("release"), +// which may be toggled on and off. Their precise semantics are defined in +// section 6.3 of the specification, but the basic idea is as follows: +// +// - If neither aq nor rl is set, the CPU may reorder the atomic arbitrarily. +// It guarantees only that it will execute atomically. +// +// - If aq is set, the CPU may move the instruction backward, but not forward. +// +// - If rl is set, the CPU may move the instruction forward, but not backward. +// +// - If both are set, the CPU may not reorder the instruction at all. +// +// These four modes correspond to other well-known memory models on other CPUs. +// On ARM, aq corresponds to a dmb ishst, aq+rl corresponds to a dmb ish. On +// Intel, aq corresponds to an lfence, rl to an sfence, and aq+rl to an mfence +// (or a lock prefix). +// +// Go's memory model requires that +// - if a read happens after a write, the read must observe the write, and +// that +// - if a read happens concurrently with a write, the read may observe the +// write. +// aq is sufficient to guarantee this, so that's what we use here. (This jibes +// with ARM, which uses dmb ishst.) + +#include "textflag.h" + +// func Cas(ptr *uint64, old, new uint64) bool +// Atomically: +// if(*val == old){ +// *val = new; +// return 1; +// } else { +// return 0; +// } +TEXT ·Cas(SB), NOSPLIT, $0-17 + MOV ptr+0(FP), A0 + MOVW old+8(FP), A1 + MOVW new+12(FP), A2 +cas_again: + LRW (A0), A3 + BNE A3, A1, cas_fail + SCW A2, (A0), A4 + BNE A4, ZERO, cas_again + MOV $1, A0 + MOVB A0, ret+16(FP) + RET +cas_fail: + MOV $0, A0 + MOV A0, ret+16(FP) + RET + +// func Cas64(ptr *uint64, old, new uint64) bool +TEXT ·Cas64(SB), NOSPLIT, $0-25 + MOV ptr+0(FP), A0 + MOV old+8(FP), A1 + MOV new+16(FP), A2 +cas_again: + LRD (A0), A3 + BNE A3, A1, cas_fail + SCD A2, (A0), A4 + BNE A4, ZERO, cas_again + MOV $1, A0 + MOVB A0, ret+24(FP) + RET +cas_fail: + MOVB ZERO, ret+24(FP) + RET + +// func Load(ptr *uint32) uint32 +TEXT ·Load(SB),NOSPLIT|NOFRAME,$0-12 + MOV ptr+0(FP), A0 + LRW (A0), A0 + MOVW A0, ret+8(FP) + RET + +// func Load8(ptr *uint8) uint8 +TEXT ·Load8(SB),NOSPLIT|NOFRAME,$0-9 + MOV ptr+0(FP), A0 + FENCE + MOVBU (A0), A1 + FENCE + MOVB A1, ret+8(FP) + RET + +// func Load64(ptr *uint64) uint64 +TEXT ·Load64(SB),NOSPLIT|NOFRAME,$0-16 + MOV ptr+0(FP), A0 + LRD (A0), A0 + MOV A0, ret+8(FP) + RET + +// func Store(ptr *uint32, val uint32) +TEXT ·Store(SB), NOSPLIT, $0-12 + MOV ptr+0(FP), A0 + MOVW val+8(FP), A1 + AMOSWAPW A1, (A0), ZERO + RET + +// func Store8(ptr *uint8, val uint8) +TEXT ·Store8(SB), NOSPLIT, $0-9 + MOV ptr+0(FP), A0 + MOVBU val+8(FP), A1 + FENCE + MOVB A1, (A0) + FENCE + RET + +// func Store64(ptr *uint64, val uint64) +TEXT ·Store64(SB), NOSPLIT, $0-16 + MOV ptr+0(FP), A0 + MOV val+8(FP), A1 + AMOSWAPD A1, (A0), ZERO + RET + +TEXT ·Casp1(SB), NOSPLIT, $0-25 + JMP ·Cas64(SB) + +TEXT ·Casint32(SB),NOSPLIT,$0-17 + JMP ·Cas(SB) + +TEXT ·Casint64(SB),NOSPLIT,$0-25 + JMP ·Cas64(SB) + +TEXT ·Casuintptr(SB),NOSPLIT,$0-25 + JMP ·Cas64(SB) + +TEXT ·CasRel(SB), NOSPLIT, $0-17 + JMP ·Cas(SB) + +TEXT ·Loaduintptr(SB),NOSPLIT,$0-16 + JMP ·Load64(SB) + +TEXT ·Storeint32(SB),NOSPLIT,$0-12 + JMP ·Store(SB) + +TEXT ·Storeint64(SB),NOSPLIT,$0-16 + JMP ·Store64(SB) + +TEXT ·Storeuintptr(SB),NOSPLIT,$0-16 + JMP ·Store64(SB) + +TEXT ·Loaduint(SB),NOSPLIT,$0-16 + JMP ·Loaduintptr(SB) + +TEXT ·Loadint32(SB),NOSPLIT,$0-12 + JMP ·Load(SB) + +TEXT ·Loadint64(SB),NOSPLIT,$0-16 + JMP ·Load64(SB) + +TEXT ·Xaddint32(SB),NOSPLIT,$0-20 + JMP ·Xadd(SB) + +TEXT ·Xaddint64(SB),NOSPLIT,$0-24 + MOV ptr+0(FP), A0 + MOV delta+8(FP), A1 + AMOADDD A1, (A0), A0 + ADD A0, A1, A0 + MOVW A0, ret+16(FP) + RET + +TEXT ·LoadAcq(SB),NOSPLIT|NOFRAME,$0-12 + JMP ·Load(SB) + +TEXT ·LoadAcq64(SB),NOSPLIT|NOFRAME,$0-16 + JMP ·Load64(SB) + +TEXT ·LoadAcquintptr(SB),NOSPLIT|NOFRAME,$0-16 + JMP ·Load64(SB) + +// func Loadp(ptr unsafe.Pointer) unsafe.Pointer +TEXT ·Loadp(SB),NOSPLIT,$0-16 + JMP ·Load64(SB) + +// func StorepNoWB(ptr unsafe.Pointer, val unsafe.Pointer) +TEXT ·StorepNoWB(SB), NOSPLIT, $0-16 + JMP ·Store64(SB) + +TEXT ·StoreRel(SB), NOSPLIT, $0-12 + JMP ·Store(SB) + +TEXT ·StoreRel64(SB), NOSPLIT, $0-16 + JMP ·Store64(SB) + +TEXT ·StoreReluintptr(SB), NOSPLIT, $0-16 + JMP ·Store64(SB) + +// func Xchg(ptr *uint32, new uint32) uint32 +TEXT ·Xchg(SB), NOSPLIT, $0-20 + MOV ptr+0(FP), A0 + MOVW new+8(FP), A1 + AMOSWAPW A1, (A0), A1 + MOVW A1, ret+16(FP) + RET + +// func Xchg64(ptr *uint64, new uint64) uint64 +TEXT ·Xchg64(SB), NOSPLIT, $0-24 + MOV ptr+0(FP), A0 + MOV new+8(FP), A1 + AMOSWAPD A1, (A0), A1 + MOV A1, ret+16(FP) + RET + +// Atomically: +// *val += delta; +// return *val; + +// func Xadd(ptr *uint32, delta int32) uint32 +TEXT ·Xadd(SB), NOSPLIT, $0-20 + MOV ptr+0(FP), A0 + MOVW delta+8(FP), A1 + AMOADDW A1, (A0), A2 + ADD A2,A1,A0 + MOVW A0, ret+16(FP) + RET + +// func Xadd64(ptr *uint64, delta int64) uint64 +TEXT ·Xadd64(SB), NOSPLIT, $0-24 + MOV ptr+0(FP), A0 + MOV delta+8(FP), A1 + AMOADDD A1, (A0), A2 + ADD A2, A1, A0 + MOV A0, ret+16(FP) + RET + +// func Xadduintptr(ptr *uintptr, delta uintptr) uintptr +TEXT ·Xadduintptr(SB), NOSPLIT, $0-24 + JMP ·Xadd64(SB) + +// func Xchgint32(ptr *int32, new int32) int32 +TEXT ·Xchgint32(SB), NOSPLIT, $0-20 + JMP ·Xchg(SB) + +// func Xchgint64(ptr *int64, new int64) int64 +TEXT ·Xchgint64(SB), NOSPLIT, $0-24 + JMP ·Xchg64(SB) + +// func Xchguintptr(ptr *uintptr, new uintptr) uintptr +TEXT ·Xchguintptr(SB), NOSPLIT, $0-24 + JMP ·Xchg64(SB) + +// func And8(ptr *uint8, val uint8) +TEXT ·And8(SB), NOSPLIT, $0-9 + MOV ptr+0(FP), A0 + MOVBU val+8(FP), A1 + AND $3, A0, A2 + AND $-4, A0 + SLL $3, A2 + XOR $255, A1 + SLL A2, A1 + XOR $-1, A1 + AMOANDW A1, (A0), ZERO + RET + +// func Or8(ptr *uint8, val uint8) +TEXT ·Or8(SB), NOSPLIT, $0-9 + MOV ptr+0(FP), A0 + MOVBU val+8(FP), A1 + AND $3, A0, A2 + AND $-4, A0 + SLL $3, A2 + SLL A2, A1 + AMOORW A1, (A0), ZERO + RET + +// func And(ptr *uint32, val uint32) +TEXT ·And(SB), NOSPLIT, $0-12 + MOV ptr+0(FP), A0 + MOVW val+8(FP), A1 + AMOANDW A1, (A0), ZERO + RET + +// func Or(ptr *uint32, val uint32) +TEXT ·Or(SB), NOSPLIT, $0-12 + MOV ptr+0(FP), A0 + MOVW val+8(FP), A1 + AMOORW A1, (A0), ZERO + RET + +// func Or32(ptr *uint32, val uint32) uint32 +TEXT ·Or32(SB), NOSPLIT, $0-20 + MOV ptr+0(FP), A0 + MOVW val+8(FP), A1 + AMOORW A1, (A0), A2 + MOVW A2, ret+16(FP) + RET + +// func And32(ptr *uint32, val uint32) uint32 +TEXT ·And32(SB), NOSPLIT, $0-20 + MOV ptr+0(FP), A0 + MOVW val+8(FP), A1 + AMOANDW A1, (A0), A2 + MOVW A2, ret+16(FP) + RET + +// func Or64(ptr *uint64, val uint64) uint64 +TEXT ·Or64(SB), NOSPLIT, $0-24 + MOV ptr+0(FP), A0 + MOV val+8(FP), A1 + AMOORD A1, (A0), A2 + MOV A2, ret+16(FP) + RET + +// func And64(ptr *uint64, val uint64) uint64 +TEXT ·And64(SB), NOSPLIT, $0-24 + MOV ptr+0(FP), A0 + MOV val+8(FP), A1 + AMOANDD A1, (A0), A2 + MOV A2, ret+16(FP) + RET + +// func Anduintptr(ptr *uintptr, val uintptr) uintptr +TEXT ·Anduintptr(SB), NOSPLIT, $0-24 + JMP ·And64(SB) + +// func Oruintptr(ptr *uintptr, val uintptr) uintptr +TEXT ·Oruintptr(SB), NOSPLIT, $0-24 + JMP ·Or64(SB) diff --git a/testing/internal/runtime/atomic/atomic_s390x.go b/testing/internal/runtime/atomic/atomic_s390x.go new file mode 100644 index 0000000..68b4e16 --- /dev/null +++ b/testing/internal/runtime/atomic/atomic_s390x.go @@ -0,0 +1,141 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package atomic + +import "unsafe" + +// Export some functions via linkname to assembly in sync/atomic. +// +//go:linkname Load +//go:linkname Loadp +//go:linkname Load64 + +//go:nosplit +//go:noinline +func Load(ptr *uint32) uint32 { + return *ptr +} + +//go:nosplit +//go:noinline +func Loadp(ptr unsafe.Pointer) unsafe.Pointer { + return *(*unsafe.Pointer)(ptr) +} + +//go:nosplit +//go:noinline +func Load8(ptr *uint8) uint8 { + return *ptr +} + +//go:nosplit +//go:noinline +func Load64(ptr *uint64) uint64 { + return *ptr +} + +//go:nosplit +//go:noinline +func LoadAcq(ptr *uint32) uint32 { + return *ptr +} + +//go:nosplit +//go:noinline +func LoadAcq64(ptr *uint64) uint64 { + return *ptr +} + +//go:nosplit +//go:noinline +func LoadAcquintptr(ptr *uintptr) uintptr { + return *ptr +} + +//go:noescape +func Store(ptr *uint32, val uint32) + +//go:noescape +func Store8(ptr *uint8, val uint8) + +//go:noescape +func Store64(ptr *uint64, val uint64) + +// NO go:noescape annotation; see atomic_pointer.go. +func StorepNoWB(ptr unsafe.Pointer, val unsafe.Pointer) + +//go:nosplit +//go:noinline +func StoreRel(ptr *uint32, val uint32) { + *ptr = val +} + +//go:nosplit +//go:noinline +func StoreRel64(ptr *uint64, val uint64) { + *ptr = val +} + +//go:nosplit +//go:noinline +func StoreReluintptr(ptr *uintptr, val uintptr) { + *ptr = val +} + +//go:noescape +func And8(ptr *uint8, val uint8) + +//go:noescape +func Or8(ptr *uint8, val uint8) + +// NOTE: Do not add atomicxor8 (XOR is not idempotent). + +//go:noescape +func And(ptr *uint32, val uint32) + +//go:noescape +func Or(ptr *uint32, val uint32) + +//go:noescape +func And32(ptr *uint32, val uint32) uint32 + +//go:noescape +func Or32(ptr *uint32, val uint32) uint32 + +//go:noescape +func And64(ptr *uint64, val uint64) uint64 + +//go:noescape +func Or64(ptr *uint64, val uint64) uint64 + +//go:noescape +func Anduintptr(ptr *uintptr, val uintptr) uintptr + +//go:noescape +func Oruintptr(ptr *uintptr, val uintptr) uintptr + +//go:noescape +func Xadd(ptr *uint32, delta int32) uint32 + +//go:noescape +func Xadd64(ptr *uint64, delta int64) uint64 + +//go:noescape +func Xadduintptr(ptr *uintptr, delta uintptr) uintptr + +//go:noescape +func Xchg(ptr *uint32, new uint32) uint32 + +//go:noescape +func Xchg64(ptr *uint64, new uint64) uint64 + +//go:noescape +func Xchguintptr(ptr *uintptr, new uintptr) uintptr + +//go:noescape +func Cas64(ptr *uint64, old, new uint64) bool + +//go:noescape +func CasRel(ptr *uint32, old, new uint32) bool diff --git a/testing/internal/runtime/atomic/atomic_s390x.s b/testing/internal/runtime/atomic/atomic_s390x.s new file mode 100644 index 0000000..6e4ea0e --- /dev/null +++ b/testing/internal/runtime/atomic/atomic_s390x.s @@ -0,0 +1,304 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +// func Store(ptr *uint32, val uint32) +TEXT ·Store(SB), NOSPLIT, $0 + MOVD ptr+0(FP), R2 + MOVWZ val+8(FP), R3 + MOVW R3, 0(R2) + SYNC + RET + +// func Store8(ptr *uint8, val uint8) +TEXT ·Store8(SB), NOSPLIT, $0 + MOVD ptr+0(FP), R2 + MOVB val+8(FP), R3 + MOVB R3, 0(R2) + SYNC + RET + +// func Store64(ptr *uint64, val uint64) +TEXT ·Store64(SB), NOSPLIT, $0 + MOVD ptr+0(FP), R2 + MOVD val+8(FP), R3 + MOVD R3, 0(R2) + SYNC + RET + +// func StorepNoWB(ptr unsafe.Pointer, val unsafe.Pointer) +TEXT ·StorepNoWB(SB), NOSPLIT, $0 + MOVD ptr+0(FP), R2 + MOVD val+8(FP), R3 + MOVD R3, 0(R2) + SYNC + RET + +// func Cas(ptr *uint32, old, new uint32) bool +// Atomically: +// if *ptr == old { +// *val = new +// return 1 +// } else { +// return 0 +// } +TEXT ·Cas(SB), NOSPLIT, $0-17 + MOVD ptr+0(FP), R3 + MOVWZ old+8(FP), R4 + MOVWZ new+12(FP), R5 + CS R4, R5, 0(R3) // if (R4 == 0(R3)) then 0(R3)= R5 + BNE cas_fail + MOVB $1, ret+16(FP) + RET +cas_fail: + MOVB $0, ret+16(FP) + RET + +// func Cas64(ptr *uint64, old, new uint64) bool +// Atomically: +// if *ptr == old { +// *ptr = new +// return 1 +// } else { +// return 0 +// } +TEXT ·Cas64(SB), NOSPLIT, $0-25 + MOVD ptr+0(FP), R3 + MOVD old+8(FP), R4 + MOVD new+16(FP), R5 + CSG R4, R5, 0(R3) // if (R4 == 0(R3)) then 0(R3)= R5 + BNE cas64_fail + MOVB $1, ret+24(FP) + RET +cas64_fail: + MOVB $0, ret+24(FP) + RET + +// func Casint32(ptr *int32, old, new int32) bool +TEXT ·Casint32(SB), NOSPLIT, $0-17 + BR ·Cas(SB) + +// func Casint64(ptr *int64, old, new int64) bool +TEXT ·Casint64(SB), NOSPLIT, $0-25 + BR ·Cas64(SB) + +// func Casuintptr(ptr *uintptr, old, new uintptr) bool +TEXT ·Casuintptr(SB), NOSPLIT, $0-25 + BR ·Cas64(SB) + +// func CasRel(ptr *uint32, old, new uint32) bool +TEXT ·CasRel(SB), NOSPLIT, $0-17 + BR ·Cas(SB) + +// func Loaduintptr(ptr *uintptr) uintptr +TEXT ·Loaduintptr(SB), NOSPLIT, $0-16 + BR ·Load64(SB) + +// func Loaduint(ptr *uint) uint +TEXT ·Loaduint(SB), NOSPLIT, $0-16 + BR ·Load64(SB) + +// func Storeint32(ptr *int32, new int32) +TEXT ·Storeint32(SB), NOSPLIT, $0-12 + BR ·Store(SB) + +// func Storeint64(ptr *int64, new int64) +TEXT ·Storeint64(SB), NOSPLIT, $0-16 + BR ·Store64(SB) + +// func Storeuintptr(ptr *uintptr, new uintptr) +TEXT ·Storeuintptr(SB), NOSPLIT, $0-16 + BR ·Store64(SB) + +// func Loadint32(ptr *int32) int32 +TEXT ·Loadint32(SB), NOSPLIT, $0-12 + BR ·Load(SB) + +// func Loadint64(ptr *int64) int64 +TEXT ·Loadint64(SB), NOSPLIT, $0-16 + BR ·Load64(SB) + +// func Xadduintptr(ptr *uintptr, delta uintptr) uintptr +TEXT ·Xadduintptr(SB), NOSPLIT, $0-24 + BR ·Xadd64(SB) + +// func Xaddint32(ptr *int32, delta int32) int32 +TEXT ·Xaddint32(SB), NOSPLIT, $0-20 + BR ·Xadd(SB) + +// func Xaddint64(ptr *int64, delta int64) int64 +TEXT ·Xaddint64(SB), NOSPLIT, $0-24 + BR ·Xadd64(SB) + +// func Casp1(ptr *unsafe.Pointer, old, new unsafe.Pointer) bool +// Atomically: +// if *ptr == old { +// *ptr = new +// return 1 +// } else { +// return 0 +// } +TEXT ·Casp1(SB), NOSPLIT, $0-25 + BR ·Cas64(SB) + +// func Xadd(ptr *uint32, delta int32) uint32 +// Atomically: +// *ptr += delta +// return *ptr +TEXT ·Xadd(SB), NOSPLIT, $0-20 + MOVD ptr+0(FP), R4 + MOVW delta+8(FP), R5 + MOVW (R4), R3 +repeat: + ADD R5, R3, R6 + CS R3, R6, (R4) // if R3==(R4) then (R4)=R6 else R3=(R4) + BNE repeat + MOVW R6, ret+16(FP) + RET + +// func Xadd64(ptr *uint64, delta int64) uint64 +TEXT ·Xadd64(SB), NOSPLIT, $0-24 + MOVD ptr+0(FP), R4 + MOVD delta+8(FP), R5 + MOVD (R4), R3 +repeat: + ADD R5, R3, R6 + CSG R3, R6, (R4) // if R3==(R4) then (R4)=R6 else R3=(R4) + BNE repeat + MOVD R6, ret+16(FP) + RET + +// func Xchg(ptr *uint32, new uint32) uint32 +TEXT ·Xchg(SB), NOSPLIT, $0-20 + MOVD ptr+0(FP), R4 + MOVW new+8(FP), R3 + MOVW (R4), R6 +repeat: + CS R6, R3, (R4) // if R6==(R4) then (R4)=R3 else R6=(R4) + BNE repeat + MOVW R6, ret+16(FP) + RET + +// func Xchg64(ptr *uint64, new uint64) uint64 +TEXT ·Xchg64(SB), NOSPLIT, $0-24 + MOVD ptr+0(FP), R4 + MOVD new+8(FP), R3 + MOVD (R4), R6 +repeat: + CSG R6, R3, (R4) // if R6==(R4) then (R4)=R3 else R6=(R4) + BNE repeat + MOVD R6, ret+16(FP) + RET + +// func Xchgint32(ptr *int32, new int32) int32 +TEXT ·Xchgint32(SB), NOSPLIT, $0-20 + BR ·Xchg(SB) + +// func Xchgint64(ptr *int64, new int64) int64 +TEXT ·Xchgint64(SB), NOSPLIT, $0-24 + BR ·Xchg64(SB) + +// func Xchguintptr(ptr *uintptr, new uintptr) uintptr +TEXT ·Xchguintptr(SB), NOSPLIT, $0-24 + BR ·Xchg64(SB) + +// func Or8(addr *uint8, v uint8) +TEXT ·Or8(SB), NOSPLIT, $0-9 + MOVD ptr+0(FP), R3 + MOVBZ val+8(FP), R4 + // We don't have atomic operations that work on individual bytes so we + // need to align addr down to a word boundary and create a mask + // containing v to OR with the entire word atomically. + MOVD $(3<<3), R5 + RXSBG $59, $60, $3, R3, R5 // R5 = 24 - ((addr % 4) * 8) = ((addr & 3) << 3) ^ (3 << 3) + ANDW $~3, R3 // R3 = floor(addr, 4) = addr &^ 3 + SLW R5, R4 // R4 = uint32(v) << R5 + LAO R4, R6, 0(R3) // R6 = *R3; *R3 |= R4; (atomic) + RET + +// func And8(addr *uint8, v uint8) +TEXT ·And8(SB), NOSPLIT, $0-9 + MOVD ptr+0(FP), R3 + MOVBZ val+8(FP), R4 + // We don't have atomic operations that work on individual bytes so we + // need to align addr down to a word boundary and create a mask + // containing v to AND with the entire word atomically. + ORW $~0xff, R4 // R4 = uint32(v) | 0xffffff00 + MOVD $(3<<3), R5 + RXSBG $59, $60, $3, R3, R5 // R5 = 24 - ((addr % 4) * 8) = ((addr & 3) << 3) ^ (3 << 3) + ANDW $~3, R3 // R3 = floor(addr, 4) = addr &^ 3 + RLL R5, R4, R4 // R4 = rotl(R4, R5) + LAN R4, R6, 0(R3) // R6 = *R3; *R3 &= R4; (atomic) + RET + +// func Or(addr *uint32, v uint32) +TEXT ·Or(SB), NOSPLIT, $0-12 + MOVD ptr+0(FP), R3 + MOVW val+8(FP), R4 + LAO R4, R6, 0(R3) // R6 = *R3; *R3 |= R4; (atomic) + RET + +// func And(addr *uint32, v uint32) +TEXT ·And(SB), NOSPLIT, $0-12 + MOVD ptr+0(FP), R3 + MOVW val+8(FP), R4 + LAN R4, R6, 0(R3) // R6 = *R3; *R3 &= R4; (atomic) + RET + +// func Or32(addr *uint32, v uint32) old uint32 +TEXT ·Or32(SB), NOSPLIT, $0-20 + MOVD ptr+0(FP), R4 + MOVW val+8(FP), R5 + MOVW (R4), R3 +repeat: + OR R5, R3, R6 + CS R3, R6, (R4) // if R3==(R4) then (R4)=R6 else R3=(R4) + BNE repeat + MOVW R3, ret+16(FP) + RET + +// func And32(addr *uint32, v uint32) old uint32 +TEXT ·And32(SB), NOSPLIT, $0-20 + MOVD ptr+0(FP), R4 + MOVW val+8(FP), R5 + MOVW (R4), R3 +repeat: + AND R5, R3, R6 + CS R3, R6, (R4) // if R3==(R4) then (R4)=R6 else R3=(R4) + BNE repeat + MOVW R3, ret+16(FP) + RET + +// func Or64(addr *uint64, v uint64) old uint64 +TEXT ·Or64(SB), NOSPLIT, $0-24 + MOVD ptr+0(FP), R4 + MOVD val+8(FP), R5 + MOVD (R4), R3 +repeat: + OR R5, R3, R6 + CSG R3, R6, (R4) // if R3==(R4) then (R4)=R6 else R3=(R4) + BNE repeat + MOVD R3, ret+16(FP) + RET + +// func And64(addr *uint64, v uint64) old uint64 +TEXT ·And64(SB), NOSPLIT, $0-24 + MOVD ptr+0(FP), R4 + MOVD val+8(FP), R5 + MOVD (R4), R3 +repeat: + AND R5, R3, R6 + CSG R3, R6, (R4) // if R3==(R4) then (R4)=R6 else R3=(R4) + BNE repeat + MOVD R3, ret+16(FP) + RET + +// func Anduintptr(addr *uintptr, v uintptr) old uintptr +TEXT ·Anduintptr(SB), NOSPLIT, $0-24 + BR ·And64(SB) + +// func Oruintptr(addr *uintptr, v uintptr) old uintptr +TEXT ·Oruintptr(SB), NOSPLIT, $0-24 + BR ·Or64(SB) diff --git a/testing/internal/runtime/atomic/atomic_test.go b/testing/internal/runtime/atomic/atomic_test.go new file mode 100644 index 0000000..04351cc --- /dev/null +++ b/testing/internal/runtime/atomic/atomic_test.go @@ -0,0 +1,387 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package atomic_test + +import ( + "runtime" + "testing" + "unsafe" + + "github.com/CodSpeedHQ/codspeed-go/testing/internal/goarch" + "github.com/CodSpeedHQ/codspeed-go/testing/internal/runtime/atomic" +) + +func runParallel(N, iter int, f func()) { + defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(int(N))) + done := make(chan bool) + for i := 0; i < N; i++ { + go func() { + for j := 0; j < iter; j++ { + f() + } + done <- true + }() + } + for i := 0; i < N; i++ { + <-done + } +} + +func TestXadduintptr(t *testing.T) { + N := 20 + iter := 100000 + if testing.Short() { + N = 10 + iter = 10000 + } + inc := uintptr(100) + total := uintptr(0) + runParallel(N, iter, func() { + atomic.Xadduintptr(&total, inc) + }) + if want := uintptr(N*iter) * inc; want != total { + t.Fatalf("xadduintpr error, want %d, got %d", want, total) + } + total = 0 + runParallel(N, iter, func() { + atomic.Xadduintptr(&total, inc) + atomic.Xadduintptr(&total, uintptr(-int64(inc))) + }) + if total != 0 { + t.Fatalf("xadduintpr total error, want %d, got %d", 0, total) + } +} + +// Tests that xadduintptr correctly updates 64-bit values. The place where +// we actually do so is mstats.go, functions mSysStat{Inc,Dec}. +func TestXadduintptrOnUint64(t *testing.T) { + if goarch.BigEndian { + // On big endian architectures, we never use xadduintptr to update + // 64-bit values and hence we skip the test. (Note that functions + // mSysStat{Inc,Dec} in mstats.go have explicit checks for + // big-endianness.) + t.Skip("skip xadduintptr on big endian architecture") + } + const inc = 100 + val := uint64(0) + atomic.Xadduintptr((*uintptr)(unsafe.Pointer(&val)), inc) + if inc != val { + t.Fatalf("xadduintptr should increase lower-order bits, want %d, got %d", inc, val) + } +} + +func shouldPanic(t *testing.T, name string, f func()) { + defer func() { + // Check that all GC maps are sane. + runtime.GC() + + err := recover() + want := "unaligned 64-bit atomic operation" + if err == nil { + t.Errorf("%s did not panic", name) + } else if s, _ := err.(string); s != want { + t.Errorf("%s: wanted panic %q, got %q", name, want, err) + } + }() + f() +} + +// Variant of sync/atomic's TestUnaligned64: +func TestUnaligned64(t *testing.T) { + // Unaligned 64-bit atomics on 32-bit systems are + // a continual source of pain. Test that on 32-bit systems they crash + // instead of failing silently. + + if unsafe.Sizeof(int(0)) != 4 { + t.Skip("test only runs on 32-bit systems") + } + + x := make([]uint32, 4) + u := unsafe.Pointer(uintptr(unsafe.Pointer(&x[0])) | 4) // force alignment to 4 + + up64 := (*uint64)(u) // misaligned + p64 := (*int64)(u) // misaligned + + shouldPanic(t, "Load64", func() { atomic.Load64(up64) }) + shouldPanic(t, "Loadint64", func() { atomic.Loadint64(p64) }) + shouldPanic(t, "Store64", func() { atomic.Store64(up64, 0) }) + shouldPanic(t, "Xadd64", func() { atomic.Xadd64(up64, 1) }) + shouldPanic(t, "Xchg64", func() { atomic.Xchg64(up64, 1) }) + shouldPanic(t, "Cas64", func() { atomic.Cas64(up64, 1, 2) }) +} + +func TestAnd8(t *testing.T) { + // Basic sanity check. + x := uint8(0xff) + for i := uint8(0); i < 8; i++ { + atomic.And8(&x, ^(1 << i)) + if r := uint8(0xff) << (i + 1); x != r { + t.Fatalf("clearing bit %#x: want %#x, got %#x", uint8(1<(SB),NOSPLIT,$0 + MOVW $0xffff0fc0, R15 // R15 is hardware PC. + +TEXT ·Cas(SB),NOSPLIT|NOFRAME,$0 + MOVB runtime·goarm(SB), R11 + CMP $7, R11 + BLT 2(PC) + JMP ·armcas(SB) + JMP kernelcas<>(SB) + +TEXT kernelcas<>(SB),NOSPLIT,$0 + MOVW ptr+0(FP), R2 + // trigger potential paging fault here, + // because we don't know how to traceback through __kuser_cmpxchg + MOVW (R2), R0 + MOVW old+4(FP), R0 + MOVW new+8(FP), R1 + BL cas<>(SB) + BCC ret0 + MOVW $1, R0 + MOVB R0, ret+12(FP) + RET +ret0: + MOVW $0, R0 + MOVB R0, ret+12(FP) + RET + +// As for cas, memory barriers are complicated on ARM, but the kernel +// provides a user helper. ARMv5 does not support SMP and has no +// memory barrier instruction at all. ARMv6 added SMP support and has +// a memory barrier, but it requires writing to a coprocessor +// register. ARMv7 introduced the DMB instruction, but it's expensive +// even on single-core devices. The kernel helper takes care of all of +// this for us. + +// Use kernel helper version of memory_barrier, when compiled with GOARM < 7. +TEXT memory_barrier<>(SB),NOSPLIT|NOFRAME,$0 + MOVW $0xffff0fa0, R15 // R15 is hardware PC. + +TEXT ·Load(SB),NOSPLIT,$0-8 + MOVW addr+0(FP), R0 + MOVW (R0), R1 + + MOVB runtime·goarm(SB), R11 + CMP $7, R11 + BGE native_barrier + BL memory_barrier<>(SB) + B end +native_barrier: + DMB MB_ISH +end: + MOVW R1, ret+4(FP) + RET + +TEXT ·Store(SB),NOSPLIT,$0-8 + MOVW addr+0(FP), R1 + MOVW v+4(FP), R2 + + MOVB runtime·goarm(SB), R8 + CMP $7, R8 + BGE native_barrier + BL memory_barrier<>(SB) + B store +native_barrier: + DMB MB_ISH + +store: + MOVW R2, (R1) + + CMP $7, R8 + BGE native_barrier2 + BL memory_barrier<>(SB) + RET +native_barrier2: + DMB MB_ISH + RET + +TEXT ·Load8(SB),NOSPLIT,$0-5 + MOVW addr+0(FP), R0 + MOVB (R0), R1 + + MOVB runtime·goarm(SB), R11 + CMP $7, R11 + BGE native_barrier + BL memory_barrier<>(SB) + B end +native_barrier: + DMB MB_ISH +end: + MOVB R1, ret+4(FP) + RET + +TEXT ·Store8(SB),NOSPLIT,$0-5 + MOVW addr+0(FP), R1 + MOVB v+4(FP), R2 + + MOVB runtime·goarm(SB), R8 + CMP $7, R8 + BGE native_barrier + BL memory_barrier<>(SB) + B store +native_barrier: + DMB MB_ISH + +store: + MOVB R2, (R1) + + CMP $7, R8 + BGE native_barrier2 + BL memory_barrier<>(SB) + RET +native_barrier2: + DMB MB_ISH + RET diff --git a/testing/internal/runtime/atomic/sys_nonlinux_arm.s b/testing/internal/runtime/atomic/sys_nonlinux_arm.s new file mode 100644 index 0000000..4ec3ec5 --- /dev/null +++ b/testing/internal/runtime/atomic/sys_nonlinux_arm.s @@ -0,0 +1,78 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !linux + +#include "textflag.h" + +// TODO(minux): this is only valid for ARMv6+ +// bool armcas(int32 *val, int32 old, int32 new) +// Atomically: +// if(*val == old){ +// *val = new; +// return 1; +// }else +// return 0; +TEXT ·Cas(SB),NOSPLIT,$0 + JMP ·armcas(SB) + +// Non-linux OSes support only single processor machines before ARMv7. +// So we don't need memory barriers if goarm < 7. And we fail loud at +// startup (runtime.checkgoarm) if it is a multi-processor but goarm < 7. + +TEXT ·Load(SB),NOSPLIT|NOFRAME,$0-8 + MOVW addr+0(FP), R0 + MOVW (R0), R1 + + MOVB runtime·goarm(SB), R11 + CMP $7, R11 + BLT 2(PC) + DMB MB_ISH + + MOVW R1, ret+4(FP) + RET + +TEXT ·Store(SB),NOSPLIT,$0-8 + MOVW addr+0(FP), R1 + MOVW v+4(FP), R2 + + MOVB runtime·goarm(SB), R8 + CMP $7, R8 + BLT 2(PC) + DMB MB_ISH + + MOVW R2, (R1) + + CMP $7, R8 + BLT 2(PC) + DMB MB_ISH + RET + +TEXT ·Load8(SB),NOSPLIT|NOFRAME,$0-5 + MOVW addr+0(FP), R0 + MOVB (R0), R1 + + MOVB runtime·goarm(SB), R11 + CMP $7, R11 + BLT 2(PC) + DMB MB_ISH + + MOVB R1, ret+4(FP) + RET + +TEXT ·Store8(SB),NOSPLIT,$0-5 + MOVW addr+0(FP), R1 + MOVB v+4(FP), R2 + + MOVB runtime·goarm(SB), R8 + CMP $7, R8 + BLT 2(PC) + DMB MB_ISH + + MOVB R2, (R1) + + CMP $7, R8 + BLT 2(PC) + DMB MB_ISH + RET diff --git a/testing/internal/runtime/atomic/types.go b/testing/internal/runtime/atomic/types.go new file mode 100644 index 0000000..287742f --- /dev/null +++ b/testing/internal/runtime/atomic/types.go @@ -0,0 +1,587 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package atomic + +import "unsafe" + +// Int32 is an atomically accessed int32 value. +// +// An Int32 must not be copied. +type Int32 struct { + noCopy noCopy + value int32 +} + +// Load accesses and returns the value atomically. +// +//go:nosplit +func (i *Int32) Load() int32 { + return Loadint32(&i.value) +} + +// Store updates the value atomically. +// +//go:nosplit +func (i *Int32) Store(value int32) { + Storeint32(&i.value, value) +} + +// CompareAndSwap atomically compares i's value with old, +// and if they're equal, swaps i's value with new. +// It reports whether the swap ran. +// +//go:nosplit +func (i *Int32) CompareAndSwap(old, new int32) bool { + return Casint32(&i.value, old, new) +} + +// Swap replaces i's value with new, returning +// i's value before the replacement. +// +//go:nosplit +func (i *Int32) Swap(new int32) int32 { + return Xchgint32(&i.value, new) +} + +// Add adds delta to i atomically, returning +// the new updated value. +// +// This operation wraps around in the usual +// two's-complement way. +// +//go:nosplit +func (i *Int32) Add(delta int32) int32 { + return Xaddint32(&i.value, delta) +} + +// Int64 is an atomically accessed int64 value. +// +// 8-byte aligned on all platforms, unlike a regular int64. +// +// An Int64 must not be copied. +type Int64 struct { + noCopy noCopy + _ align64 + value int64 +} + +// Load accesses and returns the value atomically. +// +//go:nosplit +func (i *Int64) Load() int64 { + return Loadint64(&i.value) +} + +// Store updates the value atomically. +// +//go:nosplit +func (i *Int64) Store(value int64) { + Storeint64(&i.value, value) +} + +// CompareAndSwap atomically compares i's value with old, +// and if they're equal, swaps i's value with new. +// It reports whether the swap ran. +// +//go:nosplit +func (i *Int64) CompareAndSwap(old, new int64) bool { + return Casint64(&i.value, old, new) +} + +// Swap replaces i's value with new, returning +// i's value before the replacement. +// +//go:nosplit +func (i *Int64) Swap(new int64) int64 { + return Xchgint64(&i.value, new) +} + +// Add adds delta to i atomically, returning +// the new updated value. +// +// This operation wraps around in the usual +// two's-complement way. +// +//go:nosplit +func (i *Int64) Add(delta int64) int64 { + return Xaddint64(&i.value, delta) +} + +// Uint8 is an atomically accessed uint8 value. +// +// A Uint8 must not be copied. +type Uint8 struct { + noCopy noCopy + value uint8 +} + +// Load accesses and returns the value atomically. +// +//go:nosplit +func (u *Uint8) Load() uint8 { + return Load8(&u.value) +} + +// Store updates the value atomically. +// +//go:nosplit +func (u *Uint8) Store(value uint8) { + Store8(&u.value, value) +} + +// And takes value and performs a bit-wise +// "and" operation with the value of u, storing +// the result into u. +// +// The full process is performed atomically. +// +//go:nosplit +func (u *Uint8) And(value uint8) { + And8(&u.value, value) +} + +// Or takes value and performs a bit-wise +// "or" operation with the value of u, storing +// the result into u. +// +// The full process is performed atomically. +// +//go:nosplit +func (u *Uint8) Or(value uint8) { + Or8(&u.value, value) +} + +// Bool is an atomically accessed bool value. +// +// A Bool must not be copied. +type Bool struct { + // Inherits noCopy from Uint8. + u Uint8 +} + +// Load accesses and returns the value atomically. +// +//go:nosplit +func (b *Bool) Load() bool { + return b.u.Load() != 0 +} + +// Store updates the value atomically. +// +//go:nosplit +func (b *Bool) Store(value bool) { + s := uint8(0) + if value { + s = 1 + } + b.u.Store(s) +} + +// Uint32 is an atomically accessed uint32 value. +// +// A Uint32 must not be copied. +type Uint32 struct { + noCopy noCopy + value uint32 +} + +// Load accesses and returns the value atomically. +// +//go:nosplit +func (u *Uint32) Load() uint32 { + return Load(&u.value) +} + +// LoadAcquire is a partially unsynchronized version +// of Load that relaxes ordering constraints. Other threads +// may observe operations that precede this operation to +// occur after it, but no operation that occurs after it +// on this thread can be observed to occur before it. +// +// WARNING: Use sparingly and with great care. +// +//go:nosplit +func (u *Uint32) LoadAcquire() uint32 { + return LoadAcq(&u.value) +} + +// Store updates the value atomically. +// +//go:nosplit +func (u *Uint32) Store(value uint32) { + Store(&u.value, value) +} + +// StoreRelease is a partially unsynchronized version +// of Store that relaxes ordering constraints. Other threads +// may observe operations that occur after this operation to +// precede it, but no operation that precedes it +// on this thread can be observed to occur after it. +// +// WARNING: Use sparingly and with great care. +// +//go:nosplit +func (u *Uint32) StoreRelease(value uint32) { + StoreRel(&u.value, value) +} + +// CompareAndSwap atomically compares u's value with old, +// and if they're equal, swaps u's value with new. +// It reports whether the swap ran. +// +//go:nosplit +func (u *Uint32) CompareAndSwap(old, new uint32) bool { + return Cas(&u.value, old, new) +} + +// CompareAndSwapRelease is a partially unsynchronized version +// of Cas that relaxes ordering constraints. Other threads +// may observe operations that occur after this operation to +// precede it, but no operation that precedes it +// on this thread can be observed to occur after it. +// It reports whether the swap ran. +// +// WARNING: Use sparingly and with great care. +// +//go:nosplit +func (u *Uint32) CompareAndSwapRelease(old, new uint32) bool { + return CasRel(&u.value, old, new) +} + +// Swap replaces u's value with new, returning +// u's value before the replacement. +// +//go:nosplit +func (u *Uint32) Swap(value uint32) uint32 { + return Xchg(&u.value, value) +} + +// And takes value and performs a bit-wise +// "and" operation with the value of u, storing +// the result into u. +// +// The full process is performed atomically. +// +//go:nosplit +func (u *Uint32) And(value uint32) { + And(&u.value, value) +} + +// Or takes value and performs a bit-wise +// "or" operation with the value of u, storing +// the result into u. +// +// The full process is performed atomically. +// +//go:nosplit +func (u *Uint32) Or(value uint32) { + Or(&u.value, value) +} + +// Add adds delta to u atomically, returning +// the new updated value. +// +// This operation wraps around in the usual +// two's-complement way. +// +//go:nosplit +func (u *Uint32) Add(delta int32) uint32 { + return Xadd(&u.value, delta) +} + +// Uint64 is an atomically accessed uint64 value. +// +// 8-byte aligned on all platforms, unlike a regular uint64. +// +// A Uint64 must not be copied. +type Uint64 struct { + noCopy noCopy + _ align64 + value uint64 +} + +// Load accesses and returns the value atomically. +// +//go:nosplit +func (u *Uint64) Load() uint64 { + return Load64(&u.value) +} + +// Store updates the value atomically. +// +//go:nosplit +func (u *Uint64) Store(value uint64) { + Store64(&u.value, value) +} + +// CompareAndSwap atomically compares u's value with old, +// and if they're equal, swaps u's value with new. +// It reports whether the swap ran. +// +//go:nosplit +func (u *Uint64) CompareAndSwap(old, new uint64) bool { + return Cas64(&u.value, old, new) +} + +// Swap replaces u's value with new, returning +// u's value before the replacement. +// +//go:nosplit +func (u *Uint64) Swap(value uint64) uint64 { + return Xchg64(&u.value, value) +} + +// Add adds delta to u atomically, returning +// the new updated value. +// +// This operation wraps around in the usual +// two's-complement way. +// +//go:nosplit +func (u *Uint64) Add(delta int64) uint64 { + return Xadd64(&u.value, delta) +} + +// Uintptr is an atomically accessed uintptr value. +// +// A Uintptr must not be copied. +type Uintptr struct { + noCopy noCopy + value uintptr +} + +// Load accesses and returns the value atomically. +// +//go:nosplit +func (u *Uintptr) Load() uintptr { + return Loaduintptr(&u.value) +} + +// LoadAcquire is a partially unsynchronized version +// of Load that relaxes ordering constraints. Other threads +// may observe operations that precede this operation to +// occur after it, but no operation that occurs after it +// on this thread can be observed to occur before it. +// +// WARNING: Use sparingly and with great care. +// +//go:nosplit +func (u *Uintptr) LoadAcquire() uintptr { + return LoadAcquintptr(&u.value) +} + +// Store updates the value atomically. +// +//go:nosplit +func (u *Uintptr) Store(value uintptr) { + Storeuintptr(&u.value, value) +} + +// StoreRelease is a partially unsynchronized version +// of Store that relaxes ordering constraints. Other threads +// may observe operations that occur after this operation to +// precede it, but no operation that precedes it +// on this thread can be observed to occur after it. +// +// WARNING: Use sparingly and with great care. +// +//go:nosplit +func (u *Uintptr) StoreRelease(value uintptr) { + StoreReluintptr(&u.value, value) +} + +// CompareAndSwap atomically compares u's value with old, +// and if they're equal, swaps u's value with new. +// It reports whether the swap ran. +// +//go:nosplit +func (u *Uintptr) CompareAndSwap(old, new uintptr) bool { + return Casuintptr(&u.value, old, new) +} + +// Swap replaces u's value with new, returning +// u's value before the replacement. +// +//go:nosplit +func (u *Uintptr) Swap(value uintptr) uintptr { + return Xchguintptr(&u.value, value) +} + +// Add adds delta to u atomically, returning +// the new updated value. +// +// This operation wraps around in the usual +// two's-complement way. +// +//go:nosplit +func (u *Uintptr) Add(delta uintptr) uintptr { + return Xadduintptr(&u.value, delta) +} + +// Float64 is an atomically accessed float64 value. +// +// 8-byte aligned on all platforms, unlike a regular float64. +// +// A Float64 must not be copied. +type Float64 struct { + // Inherits noCopy and align64 from Uint64. + u Uint64 +} + +// Load accesses and returns the value atomically. +// +//go:nosplit +func (f *Float64) Load() float64 { + r := f.u.Load() + return *(*float64)(unsafe.Pointer(&r)) +} + +// Store updates the value atomically. +// +//go:nosplit +func (f *Float64) Store(value float64) { + f.u.Store(*(*uint64)(unsafe.Pointer(&value))) +} + +// UnsafePointer is an atomically accessed unsafe.Pointer value. +// +// Note that because of the atomicity guarantees, stores to values +// of this type never trigger a write barrier, and the relevant +// methods are suffixed with "NoWB" to indicate that explicitly. +// As a result, this type should be used carefully, and sparingly, +// mostly with values that do not live in the Go heap anyway. +// +// An UnsafePointer must not be copied. +type UnsafePointer struct { + noCopy noCopy + value unsafe.Pointer +} + +// Load accesses and returns the value atomically. +// +//go:nosplit +func (u *UnsafePointer) Load() unsafe.Pointer { + return Loadp(unsafe.Pointer(&u.value)) +} + +// StoreNoWB updates the value atomically. +// +// WARNING: As the name implies this operation does *not* +// perform a write barrier on value, and so this operation may +// hide pointers from the GC. Use with care and sparingly. +// It is safe to use with values not found in the Go heap. +// Prefer Store instead. +// +//go:nosplit +func (u *UnsafePointer) StoreNoWB(value unsafe.Pointer) { + StorepNoWB(unsafe.Pointer(&u.value), value) +} + +// Store updates the value atomically. +func (u *UnsafePointer) Store(value unsafe.Pointer) { + storePointer(&u.value, value) +} + +// provided by runtime +// +//go:linkname storePointer +func storePointer(ptr *unsafe.Pointer, new unsafe.Pointer) + +// CompareAndSwapNoWB atomically (with respect to other methods) +// compares u's value with old, and if they're equal, +// swaps u's value with new. +// It reports whether the swap ran. +// +// WARNING: As the name implies this operation does *not* +// perform a write barrier on value, and so this operation may +// hide pointers from the GC. Use with care and sparingly. +// It is safe to use with values not found in the Go heap. +// Prefer CompareAndSwap instead. +// +//go:nosplit +func (u *UnsafePointer) CompareAndSwapNoWB(old, new unsafe.Pointer) bool { + return Casp1(&u.value, old, new) +} + +// CompareAndSwap atomically compares u's value with old, +// and if they're equal, swaps u's value with new. +// It reports whether the swap ran. +func (u *UnsafePointer) CompareAndSwap(old, new unsafe.Pointer) bool { + return casPointer(&u.value, old, new) +} + +func casPointer(ptr *unsafe.Pointer, old, new unsafe.Pointer) bool + +// Pointer is an atomic pointer of type *T. +type Pointer[T any] struct { + u UnsafePointer +} + +// Load accesses and returns the value atomically. +// +//go:nosplit +func (p *Pointer[T]) Load() *T { + return (*T)(p.u.Load()) +} + +// StoreNoWB updates the value atomically. +// +// WARNING: As the name implies this operation does *not* +// perform a write barrier on value, and so this operation may +// hide pointers from the GC. Use with care and sparingly. +// It is safe to use with values not found in the Go heap. +// Prefer Store instead. +// +//go:nosplit +func (p *Pointer[T]) StoreNoWB(value *T) { + p.u.StoreNoWB(unsafe.Pointer(value)) +} + +// Store updates the value atomically. +// +//go:nosplit +func (p *Pointer[T]) Store(value *T) { + p.u.Store(unsafe.Pointer(value)) +} + +// CompareAndSwapNoWB atomically (with respect to other methods) +// compares u's value with old, and if they're equal, +// swaps u's value with new. +// It reports whether the swap ran. +// +// WARNING: As the name implies this operation does *not* +// perform a write barrier on value, and so this operation may +// hide pointers from the GC. Use with care and sparingly. +// It is safe to use with values not found in the Go heap. +// Prefer CompareAndSwap instead. +// +//go:nosplit +func (p *Pointer[T]) CompareAndSwapNoWB(old, new *T) bool { + return p.u.CompareAndSwapNoWB(unsafe.Pointer(old), unsafe.Pointer(new)) +} + +// CompareAndSwap atomically (with respect to other methods) +// compares u's value with old, and if they're equal, +// swaps u's value with new. +// It reports whether the swap ran. +func (p *Pointer[T]) CompareAndSwap(old, new *T) bool { + return p.u.CompareAndSwap(unsafe.Pointer(old), unsafe.Pointer(new)) +} + +// noCopy may be embedded into structs which must not be copied +// after the first use. +// +// See https://golang.org/issues/8005#issuecomment-190753527 +// for details. +type noCopy struct{} + +// Lock is a no-op used by -copylocks checker from `go vet`. +func (*noCopy) Lock() {} +func (*noCopy) Unlock() {} + +// align64 may be added to structs that must be 64-bit aligned. +// This struct is recognized by a special case in the compiler +// and will not work if copied to any other package. +type align64 struct{} diff --git a/testing/internal/runtime/atomic/types_64bit.go b/testing/internal/runtime/atomic/types_64bit.go new file mode 100644 index 0000000..006e83b --- /dev/null +++ b/testing/internal/runtime/atomic/types_64bit.go @@ -0,0 +1,33 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build amd64 || arm64 || loong64 || mips64 || mips64le || ppc64 || ppc64le || riscv64 || s390x || wasm + +package atomic + +// LoadAcquire is a partially unsynchronized version +// of Load that relaxes ordering constraints. Other threads +// may observe operations that precede this operation to +// occur after it, but no operation that occurs after it +// on this thread can be observed to occur before it. +// +// WARNING: Use sparingly and with great care. +// +//go:nosplit +func (u *Uint64) LoadAcquire() uint64 { + return LoadAcq64(&u.value) +} + +// StoreRelease is a partially unsynchronized version +// of Store that relaxes ordering constraints. Other threads +// may observe operations that occur after this operation to +// precede it, but no operation that precedes it +// on this thread can be observed to occur after it. +// +// WARNING: Use sparingly and with great care. +// +//go:nosplit +func (u *Uint64) StoreRelease(value uint64) { + StoreRel64(&u.value, value) +} diff --git a/testing/internal/runtime/atomic/unaligned.go b/testing/internal/runtime/atomic/unaligned.go new file mode 100644 index 0000000..a859de4 --- /dev/null +++ b/testing/internal/runtime/atomic/unaligned.go @@ -0,0 +1,9 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package atomic + +func panicUnaligned() { + panic("unaligned 64-bit atomic operation") +} diff --git a/testing/internal/runtime/atomic/xchg8_test.go b/testing/internal/runtime/atomic/xchg8_test.go new file mode 100644 index 0000000..83467f6 --- /dev/null +++ b/testing/internal/runtime/atomic/xchg8_test.go @@ -0,0 +1,60 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build 386 || amd64 || arm || arm64 || loong64 || ppc64 || ppc64le + +package atomic_test + +import ( + "testing" + + "github.com/CodSpeedHQ/codspeed-go/testing/internal/runtime/atomic" +) + +func TestXchg8(t *testing.T) { + var a [16]uint8 + for i := range a { + next := uint8(i + 50) + a[i] = next + } + b := a + + // Compare behavior against non-atomic implementation. Expect the operation + // to work at any byte offset and to not clobber neighboring values. + for i := range a { + next := uint8(i + 100) + pa := atomic.Xchg8(&a[i], next) + pb := b[i] + b[i] = next + if pa != pb { + t.Errorf("atomic.Xchg8(a[%d]); %d != %d", i, pa, pb) + } + if a != b { + t.Errorf("after atomic.Xchg8(a[%d]); %d != %d", i, a, b) + } + if t.Failed() { + break + } + } +} + +func BenchmarkXchg8(b *testing.B) { + var x [512]uint8 // give byte its own cache line + sink = &x + for i := 0; i < b.N; i++ { + atomic.Xchg8(&x[255], uint8(i)) + } +} + +func BenchmarkXchg8Parallel(b *testing.B) { + var x [512]uint8 // give byte its own cache line + sink = &x + b.RunParallel(func(pb *testing.PB) { + i := uint8(0) + for pb.Next() { + atomic.Xchg8(&x[255], i) + i++ + } + }) +} diff --git a/testing/internal/runtime/sys/consts.go b/testing/internal/runtime/sys/consts.go new file mode 100644 index 0000000..dcfeda1 --- /dev/null +++ b/testing/internal/runtime/sys/consts.go @@ -0,0 +1,36 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sys + +import ( + "github.com/CodSpeedHQ/codspeed-go/testing/internal/goarch" + "github.com/CodSpeedHQ/codspeed-go/testing/internal/goos" +) + +// AIX and OpenBSD require a larger stack for syscalls. +// The race build also needs more stack. See issue 54291. +// This arithmetic must match that in cmd/internal/objabi/stack.go:stackGuardMultiplier. +const StackGuardMultiplier = 1 + goos.IsAix + goos.IsOpenbsd + isRace + +// DefaultPhysPageSize is the default physical page size. +const DefaultPhysPageSize = goarch.DefaultPhysPageSize + +// PCQuantum is the minimal unit for a program counter (1 on x86, 4 on most other systems). +// The various PC tables record PC deltas pre-divided by PCQuantum. +const PCQuantum = goarch.PCQuantum + +// Int64Align is the required alignment for a 64-bit integer (4 on 32-bit systems, 8 on 64-bit). +const Int64Align = goarch.PtrSize + +// MinFrameSize is the size of the system-reserved words at the bottom +// of a frame (just above the architectural stack pointer). +// It is zero on x86 and PtrSize on most non-x86 (LR-based) systems. +// On PowerPC it is larger, to cover three more reserved words: +// the compiler word, the link editor word, and the TOC save word. +const MinFrameSize = goarch.MinFrameSize + +// StackAlign is the required alignment of the SP register. +// The stack must be at least word aligned, but some architectures require more. +const StackAlign = goarch.StackAlign diff --git a/testing/internal/runtime/sys/consts_norace.go b/testing/internal/runtime/sys/consts_norace.go new file mode 100644 index 0000000..a9613b8 --- /dev/null +++ b/testing/internal/runtime/sys/consts_norace.go @@ -0,0 +1,9 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !race + +package sys + +const isRace = 0 diff --git a/testing/internal/runtime/sys/consts_race.go b/testing/internal/runtime/sys/consts_race.go new file mode 100644 index 0000000..f824fb3 --- /dev/null +++ b/testing/internal/runtime/sys/consts_race.go @@ -0,0 +1,9 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build race + +package sys + +const isRace = 1 diff --git a/testing/internal/runtime/sys/dit_arm64.go b/testing/internal/runtime/sys/dit_arm64.go new file mode 100644 index 0000000..b802c8a --- /dev/null +++ b/testing/internal/runtime/sys/dit_arm64.go @@ -0,0 +1,17 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build arm64 + +package sys + +import ( + "github.com/CodSpeedHQ/codspeed-go/testing/internal/cpu" +) + +var DITSupported = cpu.ARM64.HasDIT + +func EnableDIT() bool +func DITEnabled() bool +func DisableDIT() diff --git a/testing/internal/runtime/sys/dit_arm64.s b/testing/internal/runtime/sys/dit_arm64.s new file mode 100644 index 0000000..c27dfc9 --- /dev/null +++ b/testing/internal/runtime/sys/dit_arm64.s @@ -0,0 +1,22 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +TEXT ·EnableDIT(SB),$0-1 + MRS DIT, R0 + UBFX $24, R0, $1, R1 + MOVB R1, ret+0(FP) + MSR $1, DIT + RET + +TEXT ·DITEnabled(SB),$0-1 + MRS DIT, R0 + UBFX $24, R0, $1, R1 + MOVB R1, ret+0(FP) + RET + +TEXT ·DisableDIT(SB),$0 + MSR $0, DIT + RET diff --git a/testing/internal/runtime/sys/empty.s b/testing/internal/runtime/sys/empty.s new file mode 100644 index 0000000..3e62b7d --- /dev/null +++ b/testing/internal/runtime/sys/empty.s @@ -0,0 +1 @@ +// Empty assembly file to allow empty function bodies for intrinsics. diff --git a/testing/internal/runtime/sys/intrinsics.go b/testing/internal/runtime/sys/intrinsics.go new file mode 100644 index 0000000..147d558 --- /dev/null +++ b/testing/internal/runtime/sys/intrinsics.go @@ -0,0 +1,256 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sys + +// Copied from math/bits to avoid dependence. + +var deBruijn32tab = [32]byte{ + 0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8, + 31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9, +} + +const deBruijn32 = 0x077CB531 + +var deBruijn64tab = [64]byte{ + 0, 1, 56, 2, 57, 49, 28, 3, 61, 58, 42, 50, 38, 29, 17, 4, + 62, 47, 59, 36, 45, 43, 51, 22, 53, 39, 33, 30, 24, 18, 12, 5, + 63, 55, 48, 27, 60, 41, 37, 16, 46, 35, 44, 21, 52, 32, 23, 11, + 54, 26, 40, 15, 34, 20, 31, 10, 25, 14, 19, 9, 13, 8, 7, 6, +} + +const deBruijn64 = 0x03f79d71b4ca8b09 + +const ntz8tab = "" + + "\x08\x00\x01\x00\x02\x00\x01\x00\x03\x00\x01\x00\x02\x00\x01\x00" + + "\x04\x00\x01\x00\x02\x00\x01\x00\x03\x00\x01\x00\x02\x00\x01\x00" + + "\x05\x00\x01\x00\x02\x00\x01\x00\x03\x00\x01\x00\x02\x00\x01\x00" + + "\x04\x00\x01\x00\x02\x00\x01\x00\x03\x00\x01\x00\x02\x00\x01\x00" + + "\x06\x00\x01\x00\x02\x00\x01\x00\x03\x00\x01\x00\x02\x00\x01\x00" + + "\x04\x00\x01\x00\x02\x00\x01\x00\x03\x00\x01\x00\x02\x00\x01\x00" + + "\x05\x00\x01\x00\x02\x00\x01\x00\x03\x00\x01\x00\x02\x00\x01\x00" + + "\x04\x00\x01\x00\x02\x00\x01\x00\x03\x00\x01\x00\x02\x00\x01\x00" + + "\x07\x00\x01\x00\x02\x00\x01\x00\x03\x00\x01\x00\x02\x00\x01\x00" + + "\x04\x00\x01\x00\x02\x00\x01\x00\x03\x00\x01\x00\x02\x00\x01\x00" + + "\x05\x00\x01\x00\x02\x00\x01\x00\x03\x00\x01\x00\x02\x00\x01\x00" + + "\x04\x00\x01\x00\x02\x00\x01\x00\x03\x00\x01\x00\x02\x00\x01\x00" + + "\x06\x00\x01\x00\x02\x00\x01\x00\x03\x00\x01\x00\x02\x00\x01\x00" + + "\x04\x00\x01\x00\x02\x00\x01\x00\x03\x00\x01\x00\x02\x00\x01\x00" + + "\x05\x00\x01\x00\x02\x00\x01\x00\x03\x00\x01\x00\x02\x00\x01\x00" + + "\x04\x00\x01\x00\x02\x00\x01\x00\x03\x00\x01\x00\x02\x00\x01\x00" + +// TrailingZeros32 returns the number of trailing zero bits in x; the result is 32 for x == 0. +func TrailingZeros32(x uint32) int { + if x == 0 { + return 32 + } + // see comment in TrailingZeros64 + return int(deBruijn32tab[(x&-x)*deBruijn32>>(32-5)]) +} + +// TrailingZeros64 returns the number of trailing zero bits in x; the result is 64 for x == 0. +func TrailingZeros64(x uint64) int { + if x == 0 { + return 64 + } + // If popcount is fast, replace code below with return popcount(^x & (x - 1)). + // + // x & -x leaves only the right-most bit set in the word. Let k be the + // index of that bit. Since only a single bit is set, the value is two + // to the power of k. Multiplying by a power of two is equivalent to + // left shifting, in this case by k bits. The de Bruijn (64 bit) constant + // is such that all six bit, consecutive substrings are distinct. + // Therefore, if we have a left shifted version of this constant we can + // find by how many bits it was shifted by looking at which six bit + // substring ended up at the top of the word. + // (Knuth, volume 4, section 7.3.1) + return int(deBruijn64tab[(x&-x)*deBruijn64>>(64-6)]) +} + +// TrailingZeros8 returns the number of trailing zero bits in x; the result is 8 for x == 0. +func TrailingZeros8(x uint8) int { + return int(ntz8tab[x]) +} + +const len8tab = "" + + "\x00\x01\x02\x02\x03\x03\x03\x03\x04\x04\x04\x04\x04\x04\x04\x04" + + "\x05\x05\x05\x05\x05\x05\x05\x05\x05\x05\x05\x05\x05\x05\x05\x05" + + "\x06\x06\x06\x06\x06\x06\x06\x06\x06\x06\x06\x06\x06\x06\x06\x06" + + "\x06\x06\x06\x06\x06\x06\x06\x06\x06\x06\x06\x06\x06\x06\x06\x06" + + "\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07" + + "\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07" + + "\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07" + + "\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07" + + "\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08" + + "\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08" + + "\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08" + + "\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08" + + "\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08" + + "\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08" + + "\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08" + + "\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08" + +// Len64 returns the minimum number of bits required to represent x; the result is 0 for x == 0. +// +// nosplit because this is used in src/runtime/histogram.go, which make run in sensitive contexts. +// +//go:nosplit +func Len64(x uint64) (n int) { + if x >= 1<<32 { + x >>= 32 + n = 32 + } + if x >= 1<<16 { + x >>= 16 + n += 16 + } + if x >= 1<<8 { + x >>= 8 + n += 8 + } + return n + int(len8tab[x]) +} + +// --- OnesCount --- + +const m0 = 0x5555555555555555 // 01010101 ... +const m1 = 0x3333333333333333 // 00110011 ... +const m2 = 0x0f0f0f0f0f0f0f0f // 00001111 ... + +// OnesCount64 returns the number of one bits ("population count") in x. +func OnesCount64(x uint64) int { + // Implementation: Parallel summing of adjacent bits. + // See "Hacker's Delight", Chap. 5: Counting Bits. + // The following pattern shows the general approach: + // + // x = x>>1&(m0&m) + x&(m0&m) + // x = x>>2&(m1&m) + x&(m1&m) + // x = x>>4&(m2&m) + x&(m2&m) + // x = x>>8&(m3&m) + x&(m3&m) + // x = x>>16&(m4&m) + x&(m4&m) + // x = x>>32&(m5&m) + x&(m5&m) + // return int(x) + // + // Masking (& operations) can be left away when there's no + // danger that a field's sum will carry over into the next + // field: Since the result cannot be > 64, 8 bits is enough + // and we can ignore the masks for the shifts by 8 and up. + // Per "Hacker's Delight", the first line can be simplified + // more, but it saves at best one instruction, so we leave + // it alone for clarity. + const m = 1<<64 - 1 + x = x>>1&(m0&m) + x&(m0&m) + x = x>>2&(m1&m) + x&(m1&m) + x = (x>>4 + x) & (m2 & m) + x += x >> 8 + x += x >> 16 + x += x >> 32 + return int(x) & (1<<7 - 1) +} + +// LeadingZeros64 returns the number of leading zero bits in x; the result is 64 for x == 0. +func LeadingZeros64(x uint64) int { return 64 - Len64(x) } + +// LeadingZeros8 returns the number of leading zero bits in x; the result is 8 for x == 0. +func LeadingZeros8(x uint8) int { return 8 - Len8(x) } + +// Len8 returns the minimum number of bits required to represent x; the result is 0 for x == 0. +func Len8(x uint8) int { + return int(len8tab[x]) +} + +// Bswap64 returns its input with byte order reversed +// 0x0102030405060708 -> 0x0807060504030201 +func Bswap64(x uint64) uint64 { + c8 := uint64(0x00ff00ff00ff00ff) + a := x >> 8 & c8 + b := (x & c8) << 8 + x = a | b + c16 := uint64(0x0000ffff0000ffff) + a = x >> 16 & c16 + b = (x & c16) << 16 + x = a | b + c32 := uint64(0x00000000ffffffff) + a = x >> 32 & c32 + b = (x & c32) << 32 + x = a | b + return x +} + +// Bswap32 returns its input with byte order reversed +// 0x01020304 -> 0x04030201 +func Bswap32(x uint32) uint32 { + c8 := uint32(0x00ff00ff) + a := x >> 8 & c8 + b := (x & c8) << 8 + x = a | b + c16 := uint32(0x0000ffff) + a = x >> 16 & c16 + b = (x & c16) << 16 + x = a | b + return x +} + +// Prefetch prefetches data from memory addr to cache +// +// AMD64: Produce PREFETCHT0 instruction +// +// ARM64: Produce PRFM instruction with PLDL1KEEP option +func Prefetch(addr uintptr) {} + +// PrefetchStreamed prefetches data from memory addr, with a hint that this data is being streamed. +// That is, it is likely to be accessed very soon, but only once. If possible, this will avoid polluting the cache. +// +// AMD64: Produce PREFETCHNTA instruction +// +// ARM64: Produce PRFM instruction with PLDL1STRM option +func PrefetchStreamed(addr uintptr) {} + +// GetCallerPC returns the program counter (PC) of its caller's caller. +// GetCallerSP returns the stack pointer (SP) of its caller's caller. +// Both are implemented as intrinsics on every platform. +// +// For example: +// +// func f(arg1, arg2, arg3 int) { +// pc := GetCallerPC() +// sp := GetCallerSP() +// } +// +// These two lines find the PC and SP immediately following +// the call to f (where f will return). +// +// The call to GetCallerPC and GetCallerSP must be done in the +// frame being asked about. +// +// The result of GetCallerSP is correct at the time of the return, +// but it may be invalidated by any subsequent call to a function +// that might relocate the stack in order to grow or shrink it. +// A general rule is that the result of GetCallerSP should be used +// immediately and can only be passed to nosplit functions. + +func GetCallerPC() uintptr + +func GetCallerSP() uintptr + +// GetClosurePtr returns the pointer to the current closure. +// GetClosurePtr can only be used in an assignment statement +// at the entry of a function. Moreover, go:nosplit directive +// must be specified at the declaration of caller function, +// so that the function prolog does not clobber the closure register. +// for example: +// +// //go:nosplit +// func f(arg1, arg2, arg3 int) { +// dx := GetClosurePtr() +// } +// +// The compiler rewrites calls to this function into instructions that fetch the +// pointer from a well-known register (DX on x86 architecture, etc.) directly. +// +// WARNING: PGO-based devirtualization cannot detect that caller of +// GetClosurePtr requires closure context, and thus must maintain a list of +// these functions, which is in +// cmd/compile/internal/devirtualize/pgo.maybeDevirtualizeFunctionCall. +func GetClosurePtr() uintptr diff --git a/testing/internal/runtime/sys/intrinsics_test.go b/testing/internal/runtime/sys/intrinsics_test.go new file mode 100644 index 0000000..9761bc7 --- /dev/null +++ b/testing/internal/runtime/sys/intrinsics_test.go @@ -0,0 +1,43 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sys_test + +import ( + "testing" + + "github.com/CodSpeedHQ/codspeed-go/testing/internal/runtime/sys" +) + +func TestTrailingZeros64(t *testing.T) { + for i := 0; i <= 64; i++ { + x := uint64(5) << uint(i) + if got := sys.TrailingZeros64(x); got != i { + t.Errorf("TrailingZeros64(%d)=%d, want %d", x, got, i) + } + } +} +func TestTrailingZeros32(t *testing.T) { + for i := 0; i <= 32; i++ { + x := uint32(5) << uint(i) + if got := sys.TrailingZeros32(x); got != i { + t.Errorf("TrailingZeros32(%d)=%d, want %d", x, got, i) + } + } +} + +func TestBswap64(t *testing.T) { + x := uint64(0x1122334455667788) + y := sys.Bswap64(x) + if y != 0x8877665544332211 { + t.Errorf("Bswap(%x)=%x, want 0x8877665544332211", x, y) + } +} +func TestBswap32(t *testing.T) { + x := uint32(0x11223344) + y := sys.Bswap32(x) + if y != 0x44332211 { + t.Errorf("Bswap(%x)=%x, want 0x44332211", x, y) + } +} diff --git a/testing/internal/runtime/sys/nih.go b/testing/internal/runtime/sys/nih.go new file mode 100644 index 0000000..a9cbc48 --- /dev/null +++ b/testing/internal/runtime/sys/nih.go @@ -0,0 +1,41 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sys + +// NOTE: keep in sync with cmd/compile/internal/types.CalcSize +// to make the compiler recognize this as an intrinsic type. +type nih struct{} + +// NotInHeap is a type must never be allocated from the GC'd heap or on the stack, +// and is called not-in-heap. +// +// Other types can embed NotInHeap to make it not-in-heap. Specifically, pointers +// to these types must always fail the `runtime.inheap` check. The type may be used +// for global variables, or for objects in unmanaged memory (e.g., allocated with +// `sysAlloc`, `persistentalloc`, `fixalloc`, or from a manually-managed span). +// +// Specifically: +// +// 1. `new(T)`, `make([]T)`, `append([]T, ...)` and implicit heap +// allocation of T are disallowed. (Though implicit allocations are +// disallowed in the runtime anyway.) +// +// 2. A pointer to a regular type (other than `unsafe.Pointer`) cannot be +// converted to a pointer to a not-in-heap type, even if they have the +// same underlying type. +// +// 3. Any type that containing a not-in-heap type is itself considered as not-in-heap. +// +// - Structs and arrays are not-in-heap if their elements are not-in-heap. +// - Maps and channels contains no-in-heap types are disallowed. +// +// 4. Write barriers on pointers to not-in-heap types can be omitted. +// +// The last point is the real benefit of NotInHeap. The runtime uses +// it for low-level internal structures to avoid memory barriers in the +// scheduler and the memory allocator where they are illegal or simply +// inefficient. This mechanism is reasonably safe and does not compromise +// the readability of the runtime. +type NotInHeap struct{ _ nih } diff --git a/testing/internal/runtime/sys/no_dit.go b/testing/internal/runtime/sys/no_dit.go new file mode 100644 index 0000000..0589d0c --- /dev/null +++ b/testing/internal/runtime/sys/no_dit.go @@ -0,0 +1,13 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !arm64 + +package sys + +var DITSupported = false + +func EnableDIT() bool { return false } +func DITEnabled() bool { return false } +func DisableDIT() {} diff --git a/testing/internal/runtime/sys/sys.go b/testing/internal/runtime/sys/sys.go new file mode 100644 index 0000000..694101d --- /dev/null +++ b/testing/internal/runtime/sys/sys.go @@ -0,0 +1,7 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// package sys contains system- and configuration- and architecture-specific +// constants used by the runtime. +package sys diff --git a/testing/internal/synctest/synctest.go b/testing/internal/synctest/synctest.go new file mode 100644 index 0000000..19190d3 --- /dev/null +++ b/testing/internal/synctest/synctest.go @@ -0,0 +1,63 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package synctest provides support for testing concurrent code. +// +// See the testing/synctest package for function documentation. +package synctest + +import ( + _ "unsafe" // for go:linkname +) + +//go:linkname Run +func Run(f func()) + +//go:linkname Wait +func Wait() + +//go:linkname acquire +func acquire() any + +//go:linkname release +func release(any) + +//go:linkname inBubble +func inBubble(any, func()) + +// A Bubble is a synctest bubble. +// +// Not a public API. Used by syscall/js to propagate bubble membership through syscalls. +type Bubble struct { + b any +} + +// Acquire returns a reference to the current goroutine's bubble. +// The bubble will not become idle until Release is called. +func Acquire() *Bubble { + if b := acquire(); b != nil { + return &Bubble{b} + } + return nil +} + +// Release releases the reference to the bubble, +// allowing it to become idle again. +func (b *Bubble) Release() { + if b == nil { + return + } + release(b.b) + b.b = nil +} + +// Run executes f in the bubble. +// The current goroutine must not be part of a bubble. +func (b *Bubble) Run(f func()) { + if b == nil { + f() + } else { + inBubble(b.b, f) + } +} diff --git a/testing/internal/synctest/synctest_test.go b/testing/internal/synctest/synctest_test.go new file mode 100644 index 0000000..5dcf487 --- /dev/null +++ b/testing/internal/synctest/synctest_test.go @@ -0,0 +1,445 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package synctest_test + +import ( + "fmt" + "iter" + "reflect" + "slices" + "strconv" + "sync" + "testing" + "time" + + "github.com/CodSpeedHQ/codspeed-go/testing/internal/synctest" +) + +func TestNow(t *testing.T) { + start := time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC).In(time.Local) + synctest.Run(func() { + // Time starts at 2000-1-1 00:00:00. + if got, want := time.Now(), start; !got.Equal(want) { + t.Errorf("at start: time.Now = %v, want %v", got, want) + } + go func() { + // New goroutines see the same fake clock. + if got, want := time.Now(), start; !got.Equal(want) { + t.Errorf("time.Now = %v, want %v", got, want) + } + }() + // Time advances after a sleep. + time.Sleep(1 * time.Second) + if got, want := time.Now(), start.Add(1*time.Second); !got.Equal(want) { + t.Errorf("after sleep: time.Now = %v, want %v", got, want) + } + }) +} + +func TestRunEmpty(t *testing.T) { + synctest.Run(func() { + }) +} + +func TestSimpleWait(t *testing.T) { + synctest.Run(func() { + synctest.Wait() + }) +} + +func TestGoroutineWait(t *testing.T) { + synctest.Run(func() { + go func() {}() + synctest.Wait() + }) +} + +// TestWait starts a collection of goroutines. +// It checks that synctest.Wait waits for all goroutines to exit before returning. +func TestWait(t *testing.T) { + synctest.Run(func() { + done := false + ch := make(chan int) + var f func() + f = func() { + count := <-ch + if count == 0 { + done = true + } else { + go f() + ch <- count - 1 + } + } + go f() + ch <- 100 + synctest.Wait() + if !done { + t.Fatalf("done = false, want true") + } + }) +} + +func TestMallocs(t *testing.T) { + for i := 0; i < 100; i++ { + synctest.Run(func() { + done := false + ch := make(chan []byte) + var f func() + f = func() { + b := <-ch + if len(b) == 0 { + done = true + } else { + go f() + ch <- make([]byte, len(b)-1) + } + } + go f() + ch <- make([]byte, 100) + synctest.Wait() + if !done { + t.Fatalf("done = false, want true") + } + }) + } +} + +func TestTimerReadBeforeDeadline(t *testing.T) { + synctest.Run(func() { + start := time.Now() + tm := time.NewTimer(5 * time.Second) + <-tm.C + if got, want := time.Since(start), 5*time.Second; got != want { + t.Errorf("after sleep: time.Since(start) = %v, want %v", got, want) + } + }) +} + +func TestTimerReadAfterDeadline(t *testing.T) { + synctest.Run(func() { + delay := 1 * time.Second + want := time.Now().Add(delay) + tm := time.NewTimer(delay) + time.Sleep(2 * delay) + got := <-tm.C + if got != want { + t.Errorf("<-tm.C = %v, want %v", got, want) + } + }) +} + +func TestTimerReset(t *testing.T) { + synctest.Run(func() { + start := time.Now() + tm := time.NewTimer(1 * time.Second) + if got, want := <-tm.C, start.Add(1*time.Second); got != want { + t.Errorf("first sleep: <-tm.C = %v, want %v", got, want) + } + + tm.Reset(2 * time.Second) + if got, want := <-tm.C, start.Add((1+2)*time.Second); got != want { + t.Errorf("second sleep: <-tm.C = %v, want %v", got, want) + } + + tm.Reset(3 * time.Second) + time.Sleep(1 * time.Second) + tm.Reset(3 * time.Second) + if got, want := <-tm.C, start.Add((1+2+4)*time.Second); got != want { + t.Errorf("third sleep: <-tm.C = %v, want %v", got, want) + } + }) +} + +func TestTimeAfter(t *testing.T) { + synctest.Run(func() { + i := 0 + time.AfterFunc(1*time.Second, func() { + // Ensure synctest group membership propagates through the AfterFunc. + i++ // 1 + go func() { + time.Sleep(1 * time.Second) + i++ // 2 + }() + }) + time.Sleep(3 * time.Second) + synctest.Wait() + if got, want := i, 2; got != want { + t.Errorf("after sleep and wait: i = %v, want %v", got, want) + } + }) +} + +func TestTimerFromOutsideBubble(t *testing.T) { + tm := time.NewTimer(10 * time.Millisecond) + synctest.Run(func() { + <-tm.C + }) + if tm.Stop() { + t.Errorf("synctest.Run unexpectedly returned before timer fired") + } +} + +func TestChannelFromOutsideBubble(t *testing.T) { + choutside := make(chan struct{}) + for _, test := range []struct { + desc string + outside func(ch chan int) + inside func(ch chan int) + }{{ + desc: "read closed", + outside: func(ch chan int) { close(ch) }, + inside: func(ch chan int) { <-ch }, + }, { + desc: "read value", + outside: func(ch chan int) { ch <- 0 }, + inside: func(ch chan int) { <-ch }, + }, { + desc: "write value", + outside: func(ch chan int) { <-ch }, + inside: func(ch chan int) { ch <- 0 }, + }, { + desc: "select outside only", + outside: func(ch chan int) { close(ch) }, + inside: func(ch chan int) { + select { + case <-ch: + case <-choutside: + } + }, + }, { + desc: "select mixed", + outside: func(ch chan int) { close(ch) }, + inside: func(ch chan int) { + ch2 := make(chan struct{}) + select { + case <-ch: + case <-ch2: + } + }, + }} { + t.Run(test.desc, func(t *testing.T) { + ch := make(chan int) + time.AfterFunc(1*time.Millisecond, func() { + test.outside(ch) + }) + synctest.Run(func() { + test.inside(ch) + }) + }) + } +} + +func TestTimerFromInsideBubble(t *testing.T) { + for _, test := range []struct { + desc string + f func(tm *time.Timer) + wantPanic string + }{{ + desc: "read channel", + f: func(tm *time.Timer) { + <-tm.C + }, + wantPanic: "receive on synctest channel from outside bubble", + }, { + desc: "Reset", + f: func(tm *time.Timer) { + tm.Reset(1 * time.Second) + }, + wantPanic: "reset of synctest timer from outside bubble", + }, { + desc: "Stop", + f: func(tm *time.Timer) { + tm.Stop() + }, + wantPanic: "stop of synctest timer from outside bubble", + }} { + t.Run(test.desc, func(t *testing.T) { + donec := make(chan struct{}) + ch := make(chan *time.Timer) + go func() { + defer close(donec) + defer wantPanic(t, test.wantPanic) + test.f(<-ch) + }() + synctest.Run(func() { + tm := time.NewTimer(1 * time.Second) + ch <- tm + }) + <-donec + }) + } +} + +func TestDeadlockRoot(t *testing.T) { + defer wantPanic(t, "deadlock: all goroutines in bubble are blocked") + synctest.Run(func() { + select {} + }) +} + +func TestDeadlockChild(t *testing.T) { + defer wantPanic(t, "deadlock: all goroutines in bubble are blocked") + synctest.Run(func() { + go func() { + select {} + }() + }) +} + +func TestCond(t *testing.T) { + synctest.Run(func() { + var mu sync.Mutex + cond := sync.NewCond(&mu) + start := time.Now() + const waitTime = 1 * time.Millisecond + + go func() { + // Signal the cond. + time.Sleep(waitTime) + mu.Lock() + cond.Signal() + mu.Unlock() + + // Broadcast to the cond. + time.Sleep(waitTime) + mu.Lock() + cond.Broadcast() + mu.Unlock() + }() + + // Wait for cond.Signal. + mu.Lock() + cond.Wait() + mu.Unlock() + if got, want := time.Since(start), waitTime; got != want { + t.Errorf("after cond.Signal: time elapsed = %v, want %v", got, want) + } + + // Wait for cond.Broadcast in two goroutines. + waiterDone := false + go func() { + mu.Lock() + cond.Wait() + mu.Unlock() + waiterDone = true + }() + mu.Lock() + cond.Wait() + mu.Unlock() + synctest.Wait() + if !waiterDone { + t.Errorf("after cond.Broadcast: waiter not done") + } + if got, want := time.Since(start), 2*waitTime; got != want { + t.Errorf("after cond.Broadcast: time elapsed = %v, want %v", got, want) + } + }) +} + +func TestIteratorPush(t *testing.T) { + synctest.Run(func() { + seq := func(yield func(time.Time) bool) { + for yield(time.Now()) { + time.Sleep(1 * time.Second) + } + } + var got []time.Time + go func() { + for now := range seq { + got = append(got, now) + if len(got) >= 3 { + break + } + } + }() + want := []time.Time{ + time.Now(), + time.Now().Add(1 * time.Second), + time.Now().Add(2 * time.Second), + } + time.Sleep(5 * time.Second) + synctest.Wait() + if !slices.Equal(got, want) { + t.Errorf("got: %v; want: %v", got, want) + } + }) +} + +func TestIteratorPull(t *testing.T) { + synctest.Run(func() { + seq := func(yield func(time.Time) bool) { + for yield(time.Now()) { + time.Sleep(1 * time.Second) + } + } + var got []time.Time + go func() { + next, stop := iter.Pull(seq) + defer stop() + for len(got) < 3 { + now, _ := next() + got = append(got, now) + } + }() + want := []time.Time{ + time.Now(), + time.Now().Add(1 * time.Second), + time.Now().Add(2 * time.Second), + } + time.Sleep(5 * time.Second) + synctest.Wait() + if !slices.Equal(got, want) { + t.Errorf("got: %v; want: %v", got, want) + } + }) +} + +func TestReflectFuncOf(t *testing.T) { + mkfunc := func(name string, i int) { + reflect.FuncOf([]reflect.Type{ + reflect.StructOf([]reflect.StructField{{ + Name: name + strconv.Itoa(i), + Type: reflect.TypeOf(0), + }}), + }, nil, false) + } + go func() { + for i := 0; i < 100000; i++ { + mkfunc("A", i) + } + }() + synctest.Run(func() { + for i := 0; i < 100000; i++ { + mkfunc("A", i) + } + }) +} + +func TestWaitGroup(t *testing.T) { + synctest.Run(func() { + var wg sync.WaitGroup + wg.Add(1) + const delay = 1 * time.Second + go func() { + time.Sleep(delay) + wg.Done() + }() + start := time.Now() + wg.Wait() + if got := time.Since(start); got != delay { + t.Fatalf("WaitGroup.Wait() took %v, want %v", got, delay) + } + }) +} + +func wantPanic(t *testing.T, want string) { + if e := recover(); e != nil { + if got := fmt.Sprint(e); got != want { + t.Errorf("got panic message %q, want %q", got, want) + } + } else { + t.Errorf("got no panic, want one") + } +} diff --git a/testing/internal/syscall/windows/at_windows.go b/testing/internal/syscall/windows/at_windows.go new file mode 100644 index 0000000..05170b2 --- /dev/null +++ b/testing/internal/syscall/windows/at_windows.go @@ -0,0 +1,253 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package windows + +import ( + "syscall" + "unsafe" +) + +// Openat flags not supported by syscall.Open. +// +// These are invented values. +// +// When adding a new flag here, add an unexported version to +// the set of invented O_ values in syscall/types_windows.go +// to avoid overlap. +const ( + O_DIRECTORY = 0x100000 // target must be a directory + O_NOFOLLOW_ANY = 0x20000000 // disallow symlinks anywhere in the path + O_OPEN_REPARSE = 0x40000000 // FILE_OPEN_REPARSE_POINT, used by Lstat +) + +func Openat(dirfd syscall.Handle, name string, flag int, perm uint32) (_ syscall.Handle, e1 error) { + if len(name) == 0 { + return syscall.InvalidHandle, syscall.ERROR_FILE_NOT_FOUND + } + + var access, options uint32 + switch flag & (syscall.O_RDONLY | syscall.O_WRONLY | syscall.O_RDWR) { + case syscall.O_RDONLY: + // FILE_GENERIC_READ includes FILE_LIST_DIRECTORY. + access = FILE_GENERIC_READ + case syscall.O_WRONLY: + access = FILE_GENERIC_WRITE + options |= FILE_NON_DIRECTORY_FILE + case syscall.O_RDWR: + access = FILE_GENERIC_READ | FILE_GENERIC_WRITE + options |= FILE_NON_DIRECTORY_FILE + default: + // Stat opens files without requesting read or write permissions, + // but we still need to request SYNCHRONIZE. + access = SYNCHRONIZE + } + if flag&syscall.O_CREAT != 0 { + access |= FILE_GENERIC_WRITE + } + if flag&syscall.O_APPEND != 0 { + access |= FILE_APPEND_DATA + // Remove FILE_WRITE_DATA access unless O_TRUNC is set, + // in which case we need it to truncate the file. + if flag&syscall.O_TRUNC == 0 { + access &^= FILE_WRITE_DATA + } + } + if flag&O_DIRECTORY != 0 { + options |= FILE_DIRECTORY_FILE + access |= FILE_LIST_DIRECTORY + } + if flag&syscall.O_SYNC != 0 { + options |= FILE_WRITE_THROUGH + } + // Allow File.Stat. + access |= STANDARD_RIGHTS_READ | FILE_READ_ATTRIBUTES | FILE_READ_EA + + objAttrs := &OBJECT_ATTRIBUTES{} + if flag&O_NOFOLLOW_ANY != 0 { + objAttrs.Attributes |= OBJ_DONT_REPARSE + } + if flag&syscall.O_CLOEXEC == 0 { + objAttrs.Attributes |= OBJ_INHERIT + } + if err := objAttrs.init(dirfd, name); err != nil { + return syscall.InvalidHandle, err + } + + if flag&O_OPEN_REPARSE != 0 { + options |= FILE_OPEN_REPARSE_POINT + } + + // We don't use FILE_OVERWRITE/FILE_OVERWRITE_IF, because when opening + // a file with FILE_ATTRIBUTE_READONLY these will replace an existing + // file with a new, read-only one. + // + // Instead, we ftruncate the file after opening when O_TRUNC is set. + var disposition uint32 + switch { + case flag&(syscall.O_CREAT|syscall.O_EXCL) == (syscall.O_CREAT | syscall.O_EXCL): + disposition = FILE_CREATE + options |= FILE_OPEN_REPARSE_POINT // don't follow symlinks + case flag&syscall.O_CREAT == syscall.O_CREAT: + disposition = FILE_OPEN_IF + default: + disposition = FILE_OPEN + } + + fileAttrs := uint32(FILE_ATTRIBUTE_NORMAL) + if perm&syscall.S_IWRITE == 0 { + fileAttrs = FILE_ATTRIBUTE_READONLY + } + + var h syscall.Handle + err := NtCreateFile( + &h, + SYNCHRONIZE|access, + objAttrs, + &IO_STATUS_BLOCK{}, + nil, + fileAttrs, + FILE_SHARE_READ|FILE_SHARE_WRITE|FILE_SHARE_DELETE, + disposition, + FILE_SYNCHRONOUS_IO_NONALERT|FILE_OPEN_FOR_BACKUP_INTENT|options, + 0, + 0, + ) + if err != nil { + return h, ntCreateFileError(err, flag) + } + + if flag&syscall.O_TRUNC != 0 { + err = syscall.Ftruncate(h, 0) + if err != nil { + syscall.CloseHandle(h) + return syscall.InvalidHandle, err + } + } + + return h, nil +} + +// ntCreateFileError maps error returns from NTCreateFile to user-visible errors. +func ntCreateFileError(err error, flag int) error { + s, ok := err.(NTStatus) + if !ok { + // Shouldn't really be possible, NtCreateFile always returns NTStatus. + return err + } + switch s { + case STATUS_REPARSE_POINT_ENCOUNTERED: + return syscall.ELOOP + case STATUS_NOT_A_DIRECTORY: + // ENOTDIR is the errno returned by open when O_DIRECTORY is specified + // and the target is not a directory. + // + // NtCreateFile can return STATUS_NOT_A_DIRECTORY under other circumstances, + // such as when opening "file/" where "file" is not a directory. + // (This might be Windows version dependent.) + // + // Only map STATUS_NOT_A_DIRECTORY to ENOTDIR when O_DIRECTORY is specified. + if flag&O_DIRECTORY != 0 { + return syscall.ENOTDIR + } + case STATUS_FILE_IS_A_DIRECTORY: + return syscall.EISDIR + } + return s.Errno() +} + +func Mkdirat(dirfd syscall.Handle, name string, mode uint32) error { + objAttrs := &OBJECT_ATTRIBUTES{} + if err := objAttrs.init(dirfd, name); err != nil { + return err + } + var h syscall.Handle + err := NtCreateFile( + &h, + FILE_GENERIC_READ, + objAttrs, + &IO_STATUS_BLOCK{}, + nil, + syscall.FILE_ATTRIBUTE_NORMAL, + syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, + FILE_CREATE, + FILE_DIRECTORY_FILE, + 0, + 0, + ) + if err != nil { + return ntCreateFileError(err, 0) + } + syscall.CloseHandle(h) + return nil +} + +func Deleteat(dirfd syscall.Handle, name string) error { + objAttrs := &OBJECT_ATTRIBUTES{} + if err := objAttrs.init(dirfd, name); err != nil { + return err + } + var h syscall.Handle + err := NtOpenFile( + &h, + DELETE, + objAttrs, + &IO_STATUS_BLOCK{}, + FILE_SHARE_DELETE|FILE_SHARE_READ|FILE_SHARE_WRITE, + FILE_OPEN_REPARSE_POINT|FILE_OPEN_FOR_BACKUP_INTENT, + ) + if err != nil { + return ntCreateFileError(err, 0) + } + defer syscall.CloseHandle(h) + + const ( + FileDispositionInformation = 13 + FileDispositionInformationEx = 64 + ) + + // First, attempt to delete the file using POSIX semantics + // (which permit a file to be deleted while it is still open). + // This matches the behavior of DeleteFileW. + err = NtSetInformationFile( + h, + &IO_STATUS_BLOCK{}, + uintptr(unsafe.Pointer(&FILE_DISPOSITION_INFORMATION_EX{ + Flags: FILE_DISPOSITION_DELETE | + FILE_DISPOSITION_FORCE_IMAGE_SECTION_CHECK | + FILE_DISPOSITION_POSIX_SEMANTICS | + // This differs from DeleteFileW, but matches os.Remove's + // behavior on Unix platforms of permitting deletion of + // read-only files. + FILE_DISPOSITION_IGNORE_READONLY_ATTRIBUTE, + })), + uint32(unsafe.Sizeof(FILE_DISPOSITION_INFORMATION_EX{})), + FileDispositionInformationEx, + ) + switch err { + case nil: + return nil + case STATUS_CANNOT_DELETE, STATUS_DIRECTORY_NOT_EMPTY: + return err.(NTStatus).Errno() + } + + // If the prior deletion failed, the filesystem either doesn't support + // POSIX semantics (for example, FAT), or hasn't implemented + // FILE_DISPOSITION_INFORMATION_EX. + // + // Try again. + err = NtSetInformationFile( + h, + &IO_STATUS_BLOCK{}, + uintptr(unsafe.Pointer(&FILE_DISPOSITION_INFORMATION{ + DeleteFile: true, + })), + uint32(unsafe.Sizeof(FILE_DISPOSITION_INFORMATION{})), + FileDispositionInformation, + ) + if st, ok := err.(NTStatus); ok { + return st.Errno() + } + return err +} diff --git a/testing/internal/syscall/windows/at_windows_test.go b/testing/internal/syscall/windows/at_windows_test.go new file mode 100644 index 0000000..26b42a9 --- /dev/null +++ b/testing/internal/syscall/windows/at_windows_test.go @@ -0,0 +1,59 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package windows_test + +import ( + "os" + "path/filepath" + "syscall" + "testing" + + "github.com/CodSpeedHQ/codspeed-go/testing/internal/syscall/windows" +) + +func TestOpen(t *testing.T) { + t.Parallel() + + dir := t.TempDir() + file := filepath.Join(dir, "a") + f, err := os.Create(file) + if err != nil { + t.Fatal(err) + } + f.Close() + + tests := []struct { + path string + flag int + err error + }{ + {dir, syscall.O_RDONLY, nil}, + {dir, syscall.O_CREAT, nil}, + {dir, syscall.O_RDONLY | syscall.O_CREAT, nil}, + {file, syscall.O_APPEND | syscall.O_WRONLY | os.O_CREATE, nil}, + {file, syscall.O_APPEND | syscall.O_WRONLY | os.O_CREATE | os.O_TRUNC, nil}, + {dir, syscall.O_RDONLY | syscall.O_TRUNC, syscall.ERROR_ACCESS_DENIED}, + {dir, syscall.O_WRONLY | syscall.O_RDWR, nil}, // TODO: syscall.Open returns EISDIR here, we should reconcile this + {dir, syscall.O_WRONLY, syscall.EISDIR}, + {dir, syscall.O_RDWR, syscall.EISDIR}, + } + for i, tt := range tests { + dir := filepath.Dir(tt.path) + dirfd, err := syscall.Open(dir, syscall.O_RDONLY, 0) + if err != nil { + t.Error(err) + continue + } + base := filepath.Base(tt.path) + h, err := windows.Openat(dirfd, base, tt.flag, 0o660) + syscall.CloseHandle(dirfd) + if err == nil { + syscall.CloseHandle(h) + } + if err != tt.err { + t.Errorf("%d: Open got %q, want %q", i, err, tt.err) + } + } +} diff --git a/testing/internal/syscall/windows/exec_windows_test.go b/testing/internal/syscall/windows/exec_windows_test.go new file mode 100644 index 0000000..59fc70d --- /dev/null +++ b/testing/internal/syscall/windows/exec_windows_test.go @@ -0,0 +1,140 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build windows + +package windows_test + +import ( + "fmt" + "os" + "os/exec" + "syscall" + "testing" + "unsafe" + + "github.com/CodSpeedHQ/codspeed-go/testing/internal/syscall/windows" +) + +func TestRunAtLowIntegrity(t *testing.T) { + if os.Getenv("GO_WANT_HELPER_PROCESS") == "1" { + wil, err := getProcessIntegrityLevel() + if err != nil { + fmt.Fprintf(os.Stderr, "error: %s\n", err.Error()) + os.Exit(9) + return + } + fmt.Printf("%s", wil) + os.Exit(0) + return + } + + cmd := exec.Command(os.Args[0], "-test.run=^TestRunAtLowIntegrity$", "--") + cmd.Env = []string{"GO_WANT_HELPER_PROCESS=1"} + + token, err := getIntegrityLevelToken(sidWilLow) + if err != nil { + t.Fatal(err) + } + defer token.Close() + + cmd.SysProcAttr = &syscall.SysProcAttr{ + Token: token, + } + + out, err := cmd.CombinedOutput() + if err != nil { + t.Fatal(err) + } + + if string(out) != sidWilLow { + t.Fatalf("Child process did not run as low integrity level: %s", string(out)) + } +} + +const ( + sidWilLow = `S-1-16-4096` +) + +func getProcessIntegrityLevel() (string, error) { + procToken, err := syscall.OpenCurrentProcessToken() + if err != nil { + return "", err + } + defer procToken.Close() + + p, err := tokenGetInfo(procToken, syscall.TokenIntegrityLevel, 64) + if err != nil { + return "", err + } + + tml := (*windows.TOKEN_MANDATORY_LABEL)(p) + + sid := (*syscall.SID)(unsafe.Pointer(tml.Label.Sid)) + + return sid.String() +} + +func tokenGetInfo(t syscall.Token, class uint32, initSize int) (unsafe.Pointer, error) { + n := uint32(initSize) + for { + b := make([]byte, n) + e := syscall.GetTokenInformation(t, class, &b[0], uint32(len(b)), &n) + if e == nil { + return unsafe.Pointer(&b[0]), nil + } + if e != syscall.ERROR_INSUFFICIENT_BUFFER { + return nil, e + } + if n <= uint32(len(b)) { + return nil, e + } + } +} + +func getIntegrityLevelToken(wns string) (syscall.Token, error) { + var procToken, token syscall.Token + + proc, err := syscall.GetCurrentProcess() + if err != nil { + return 0, err + } + defer syscall.CloseHandle(proc) + + err = syscall.OpenProcessToken(proc, + syscall.TOKEN_DUPLICATE| + syscall.TOKEN_ADJUST_DEFAULT| + syscall.TOKEN_QUERY| + syscall.TOKEN_ASSIGN_PRIMARY, + &procToken) + if err != nil { + return 0, err + } + defer procToken.Close() + + sid, err := syscall.StringToSid(wns) + if err != nil { + return 0, err + } + + tml := &windows.TOKEN_MANDATORY_LABEL{} + tml.Label.Attributes = windows.SE_GROUP_INTEGRITY + tml.Label.Sid = sid + + err = windows.DuplicateTokenEx(procToken, 0, nil, windows.SecurityImpersonation, + windows.TokenPrimary, &token) + if err != nil { + return 0, err + } + + err = windows.SetTokenInformation(token, + syscall.TokenIntegrityLevel, + uintptr(unsafe.Pointer(tml)), + tml.Size()) + if err != nil { + token.Close() + return 0, err + } + return token, nil +} diff --git a/testing/internal/syscall/windows/memory_windows.go b/testing/internal/syscall/windows/memory_windows.go new file mode 100644 index 0000000..8fb34cf --- /dev/null +++ b/testing/internal/syscall/windows/memory_windows.go @@ -0,0 +1,24 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package windows + +type MemoryBasicInformation struct { + // A pointer to the base address of the region of pages. + BaseAddress uintptr + // A pointer to the base address of a range of pages allocated by the VirtualAlloc function. + // The page pointed to by the BaseAddress member is contained within this allocation range. + AllocationBase uintptr + // The memory protection option when the region was initially allocated + AllocationProtect uint32 + PartitionId uint16 + // The size of the region beginning at the base address in which all pages have identical attributes, in bytes. + RegionSize uintptr + // The state of the pages in the region. + State uint32 + // The access protection of the pages in the region. + Protect uint32 + // The type of pages in the region. + Type uint32 +} diff --git a/testing/internal/syscall/windows/mksyscall.go b/testing/internal/syscall/windows/mksyscall.go new file mode 100644 index 0000000..f97ab52 --- /dev/null +++ b/testing/internal/syscall/windows/mksyscall.go @@ -0,0 +1,9 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build generate + +package windows + +//go:generate go run ../../../syscall/mksyscall_windows.go -output zsyscall_windows.go syscall_windows.go security_windows.go psapi_windows.go symlink_windows.go version_windows.go diff --git a/testing/internal/syscall/windows/net_windows.go b/testing/internal/syscall/windows/net_windows.go new file mode 100644 index 0000000..9fa5ecf --- /dev/null +++ b/testing/internal/syscall/windows/net_windows.go @@ -0,0 +1,29 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package windows + +import ( + "syscall" + _ "unsafe" +) + +//go:linkname WSASendtoInet4 syscall.wsaSendtoInet4 +//go:noescape +func WSASendtoInet4(s syscall.Handle, bufs *syscall.WSABuf, bufcnt uint32, sent *uint32, flags uint32, to *syscall.SockaddrInet4, overlapped *syscall.Overlapped, croutine *byte) (err error) + +//go:linkname WSASendtoInet6 syscall.wsaSendtoInet6 +//go:noescape +func WSASendtoInet6(s syscall.Handle, bufs *syscall.WSABuf, bufcnt uint32, sent *uint32, flags uint32, to *syscall.SockaddrInet6, overlapped *syscall.Overlapped, croutine *byte) (err error) + +const ( + SIO_TCP_INITIAL_RTO = syscall.IOC_IN | syscall.IOC_VENDOR | 17 + TCP_INITIAL_RTO_UNSPECIFIED_RTT = ^uint16(0) + TCP_INITIAL_RTO_NO_SYN_RETRANSMISSIONS = ^uint8(1) +) + +type TCP_INITIAL_RTO_PARAMETERS struct { + Rtt uint16 + MaxSynRetransmissions uint8 +} diff --git a/testing/internal/syscall/windows/psapi_windows.go b/testing/internal/syscall/windows/psapi_windows.go new file mode 100644 index 0000000..b138e65 --- /dev/null +++ b/testing/internal/syscall/windows/psapi_windows.go @@ -0,0 +1,20 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package windows + +type PROCESS_MEMORY_COUNTERS struct { + CB uint32 + PageFaultCount uint32 + PeakWorkingSetSize uintptr + WorkingSetSize uintptr + QuotaPeakPagedPoolUsage uintptr + QuotaPagedPoolUsage uintptr + QuotaPeakNonPagedPoolUsage uintptr + QuotaNonPagedPoolUsage uintptr + PagefileUsage uintptr + PeakPagefileUsage uintptr +} + +//sys GetProcessMemoryInfo(handle syscall.Handle, memCounters *PROCESS_MEMORY_COUNTERS, cb uint32) (err error) = psapi.GetProcessMemoryInfo diff --git a/testing/internal/syscall/windows/registry/export_test.go b/testing/internal/syscall/windows/registry/export_test.go new file mode 100644 index 0000000..7f1ac70 --- /dev/null +++ b/testing/internal/syscall/windows/registry/export_test.go @@ -0,0 +1,11 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build windows + +package registry + +func (k Key) SetValue(name string, valtype uint32, data []byte) error { + return k.setValue(name, valtype, data) +} diff --git a/testing/internal/syscall/windows/registry/key.go b/testing/internal/syscall/windows/registry/key.go new file mode 100644 index 0000000..b95fa8d --- /dev/null +++ b/testing/internal/syscall/windows/registry/key.go @@ -0,0 +1,168 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build windows + +// Package registry provides access to the Windows registry. +// +// Here is a simple example, opening a registry key and reading a string value from it. +// +// k, err := registry.OpenKey(registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE) +// if err != nil { +// log.Fatal(err) +// } +// defer k.Close() +// +// s, _, err := k.GetStringValue("SystemRoot") +// if err != nil { +// log.Fatal(err) +// } +// fmt.Printf("Windows system root is %q\n", s) +// +// NOTE: This package is a copy of golang.org/x/sys/windows/registry +// with KeyInfo.ModTime removed to prevent dependency cycles. +package registry + +import ( + "runtime" + "syscall" +) + +const ( + // Registry key security and access rights. + // See https://learn.microsoft.com/en-us/windows/win32/sysinfo/registry-key-security-and-access-rights + // for details. + ALL_ACCESS = 0xf003f + CREATE_LINK = 0x00020 + CREATE_SUB_KEY = 0x00004 + ENUMERATE_SUB_KEYS = 0x00008 + EXECUTE = 0x20019 + NOTIFY = 0x00010 + QUERY_VALUE = 0x00001 + READ = 0x20019 + SET_VALUE = 0x00002 + WOW64_32KEY = 0x00200 + WOW64_64KEY = 0x00100 + WRITE = 0x20006 +) + +// Key is a handle to an open Windows registry key. +// Keys can be obtained by calling OpenKey; there are +// also some predefined root keys such as CURRENT_USER. +// Keys can be used directly in the Windows API. +type Key syscall.Handle + +const ( + // Windows defines some predefined root keys that are always open. + // An application can use these keys as entry points to the registry. + // Normally these keys are used in OpenKey to open new keys, + // but they can also be used anywhere a Key is required. + CLASSES_ROOT = Key(syscall.HKEY_CLASSES_ROOT) + CURRENT_USER = Key(syscall.HKEY_CURRENT_USER) + LOCAL_MACHINE = Key(syscall.HKEY_LOCAL_MACHINE) + USERS = Key(syscall.HKEY_USERS) + CURRENT_CONFIG = Key(syscall.HKEY_CURRENT_CONFIG) +) + +// Close closes open key k. +func (k Key) Close() error { + return syscall.RegCloseKey(syscall.Handle(k)) +} + +// OpenKey opens a new key with path name relative to key k. +// It accepts any open key, including CURRENT_USER and others, +// and returns the new key and an error. +// The access parameter specifies desired access rights to the +// key to be opened. +func OpenKey(k Key, path string, access uint32) (Key, error) { + p, err := syscall.UTF16PtrFromString(path) + if err != nil { + return 0, err + } + var subkey syscall.Handle + err = syscall.RegOpenKeyEx(syscall.Handle(k), p, 0, access, &subkey) + if err != nil { + return 0, err + } + return Key(subkey), nil +} + +// ReadSubKeyNames returns the names of subkeys of key k. +func (k Key) ReadSubKeyNames() ([]string, error) { + // RegEnumKeyEx must be called repeatedly and to completion. + // During this time, this goroutine cannot migrate away from + // its current thread. See #49320. + runtime.LockOSThread() + defer runtime.UnlockOSThread() + + names := make([]string, 0) + // Registry key size limit is 255 bytes and described there: + // https://learn.microsoft.com/en-us/windows/win32/sysinfo/registry-element-size-limits + buf := make([]uint16, 256) //plus extra room for terminating zero byte +loopItems: + for i := uint32(0); ; i++ { + l := uint32(len(buf)) + for { + err := syscall.RegEnumKeyEx(syscall.Handle(k), i, &buf[0], &l, nil, nil, nil, nil) + if err == nil { + break + } + if err == syscall.ERROR_MORE_DATA { + // Double buffer size and try again. + l = uint32(2 * len(buf)) + buf = make([]uint16, l) + continue + } + if err == _ERROR_NO_MORE_ITEMS { + break loopItems + } + return names, err + } + names = append(names, syscall.UTF16ToString(buf[:l])) + } + return names, nil +} + +// CreateKey creates a key named path under open key k. +// CreateKey returns the new key and a boolean flag that reports +// whether the key already existed. +// The access parameter specifies the access rights for the key +// to be created. +func CreateKey(k Key, path string, access uint32) (newk Key, openedExisting bool, err error) { + var h syscall.Handle + var d uint32 + err = regCreateKeyEx(syscall.Handle(k), syscall.StringToUTF16Ptr(path), + 0, nil, _REG_OPTION_NON_VOLATILE, access, nil, &h, &d) + if err != nil { + return 0, false, err + } + return Key(h), d == _REG_OPENED_EXISTING_KEY, nil +} + +// DeleteKey deletes the subkey path of key k and its values. +func DeleteKey(k Key, path string) error { + return regDeleteKey(syscall.Handle(k), syscall.StringToUTF16Ptr(path)) +} + +// A KeyInfo describes the statistics of a key. It is returned by Stat. +type KeyInfo struct { + SubKeyCount uint32 + MaxSubKeyLen uint32 // size of the key's subkey with the longest name, in Unicode characters, not including the terminating zero byte + ValueCount uint32 + MaxValueNameLen uint32 // size of the key's longest value name, in Unicode characters, not including the terminating zero byte + MaxValueLen uint32 // longest data component among the key's values, in bytes + lastWriteTime syscall.Filetime +} + +// Stat retrieves information about the open key k. +func (k Key) Stat() (*KeyInfo, error) { + var ki KeyInfo + err := syscall.RegQueryInfoKey(syscall.Handle(k), nil, nil, nil, + &ki.SubKeyCount, &ki.MaxSubKeyLen, nil, &ki.ValueCount, + &ki.MaxValueNameLen, &ki.MaxValueLen, nil, &ki.lastWriteTime) + if err != nil { + return nil, err + } + return &ki, nil +} diff --git a/testing/internal/syscall/windows/registry/mksyscall.go b/testing/internal/syscall/windows/registry/mksyscall.go new file mode 100644 index 0000000..0e0b421 --- /dev/null +++ b/testing/internal/syscall/windows/registry/mksyscall.go @@ -0,0 +1,9 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build generate + +package registry + +//go:generate go run ../../../../syscall/mksyscall_windows.go -output zsyscall_windows.go syscall.go diff --git a/testing/internal/syscall/windows/registry/registry_test.go b/testing/internal/syscall/windows/registry/registry_test.go new file mode 100644 index 0000000..661b9ae --- /dev/null +++ b/testing/internal/syscall/windows/registry/registry_test.go @@ -0,0 +1,666 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build windows + +package registry_test + +import ( + "bytes" + "crypto/rand" + "os" + "syscall" + "testing" + "unsafe" + + "github.com/CodSpeedHQ/codspeed-go/testing/internal/syscall/windows/registry" +) + +func randKeyName(prefix string) string { + const numbers = "0123456789" + buf := make([]byte, 10) + rand.Read(buf) + for i, b := range buf { + buf[i] = numbers[b%byte(len(numbers))] + } + return prefix + string(buf) +} + +func TestReadSubKeyNames(t *testing.T) { + k, err := registry.OpenKey(registry.CLASSES_ROOT, "TypeLib", registry.ENUMERATE_SUB_KEYS) + if err != nil { + t.Fatal(err) + } + defer k.Close() + + names, err := k.ReadSubKeyNames() + if err != nil { + t.Fatal(err) + } + var foundStdOle bool + for _, name := range names { + // Every PC has "stdole 2.0 OLE Automation" library installed. + if name == "{00020430-0000-0000-C000-000000000046}" { + foundStdOle = true + } + } + if !foundStdOle { + t.Fatal("could not find stdole 2.0 OLE Automation") + } +} + +func TestCreateOpenDeleteKey(t *testing.T) { + k, err := registry.OpenKey(registry.CURRENT_USER, "Software", registry.QUERY_VALUE) + if err != nil { + t.Fatal(err) + } + defer k.Close() + + testKName := randKeyName("TestCreateOpenDeleteKey_") + + testK, exist, err := registry.CreateKey(k, testKName, registry.CREATE_SUB_KEY) + if err != nil { + t.Fatal(err) + } + defer testK.Close() + + if exist { + t.Fatalf("key %q already exists", testKName) + } + + testKAgain, exist, err := registry.CreateKey(k, testKName, registry.CREATE_SUB_KEY) + if err != nil { + t.Fatal(err) + } + defer testKAgain.Close() + + if !exist { + t.Fatalf("key %q should already exist", testKName) + } + + testKOpened, err := registry.OpenKey(k, testKName, registry.ENUMERATE_SUB_KEYS) + if err != nil { + t.Fatal(err) + } + defer testKOpened.Close() + + err = registry.DeleteKey(k, testKName) + if err != nil { + t.Fatal(err) + } + + testKOpenedAgain, err := registry.OpenKey(k, testKName, registry.ENUMERATE_SUB_KEYS) + if err == nil { + defer testKOpenedAgain.Close() + t.Fatalf("key %q should already been deleted", testKName) + } + if err != registry.ErrNotExist { + t.Fatalf(`unexpected error ("not exist" expected): %v`, err) + } +} + +func equalStringSlice(a, b []string) bool { + if len(a) != len(b) { + return false + } + if a == nil { + return true + } + for i := range a { + if a[i] != b[i] { + return false + } + } + return true +} + +type ValueTest struct { + Type uint32 + Name string + Value any + WillFail bool +} + +var ValueTests = []ValueTest{ + {Type: registry.SZ, Name: "String1", Value: ""}, + {Type: registry.SZ, Name: "String2", Value: "\000", WillFail: true}, + {Type: registry.SZ, Name: "String3", Value: "Hello World"}, + {Type: registry.SZ, Name: "String4", Value: "Hello World\000", WillFail: true}, + {Type: registry.EXPAND_SZ, Name: "ExpString1", Value: ""}, + {Type: registry.EXPAND_SZ, Name: "ExpString2", Value: "\000", WillFail: true}, + {Type: registry.EXPAND_SZ, Name: "ExpString3", Value: "Hello World"}, + {Type: registry.EXPAND_SZ, Name: "ExpString4", Value: "Hello\000World", WillFail: true}, + {Type: registry.EXPAND_SZ, Name: "ExpString5", Value: "%PATH%"}, + {Type: registry.EXPAND_SZ, Name: "ExpString6", Value: "%NO_SUCH_VARIABLE%"}, + {Type: registry.EXPAND_SZ, Name: "ExpString7", Value: "%PATH%;."}, + {Type: registry.BINARY, Name: "Binary1", Value: []byte{}}, + {Type: registry.BINARY, Name: "Binary2", Value: []byte{1, 2, 3}}, + {Type: registry.BINARY, Name: "Binary3", Value: []byte{3, 2, 1, 0, 1, 2, 3}}, + {Type: registry.DWORD, Name: "Dword1", Value: uint64(0)}, + {Type: registry.DWORD, Name: "Dword2", Value: uint64(1)}, + {Type: registry.DWORD, Name: "Dword3", Value: uint64(0xff)}, + {Type: registry.DWORD, Name: "Dword4", Value: uint64(0xffff)}, + {Type: registry.QWORD, Name: "Qword1", Value: uint64(0)}, + {Type: registry.QWORD, Name: "Qword2", Value: uint64(1)}, + {Type: registry.QWORD, Name: "Qword3", Value: uint64(0xff)}, + {Type: registry.QWORD, Name: "Qword4", Value: uint64(0xffff)}, + {Type: registry.QWORD, Name: "Qword5", Value: uint64(0xffffff)}, + {Type: registry.QWORD, Name: "Qword6", Value: uint64(0xffffffff)}, + {Type: registry.MULTI_SZ, Name: "MultiString1", Value: []string{"a", "b", "c"}}, + {Type: registry.MULTI_SZ, Name: "MultiString2", Value: []string{"abc", "", "cba"}}, + {Type: registry.MULTI_SZ, Name: "MultiString3", Value: []string{""}}, + {Type: registry.MULTI_SZ, Name: "MultiString4", Value: []string{"abcdef"}}, + {Type: registry.MULTI_SZ, Name: "MultiString5", Value: []string{"\000"}, WillFail: true}, + {Type: registry.MULTI_SZ, Name: "MultiString6", Value: []string{"a\000b"}, WillFail: true}, + {Type: registry.MULTI_SZ, Name: "MultiString7", Value: []string{"ab", "\000", "cd"}, WillFail: true}, + {Type: registry.MULTI_SZ, Name: "MultiString8", Value: []string{"\000", "cd"}, WillFail: true}, + {Type: registry.MULTI_SZ, Name: "MultiString9", Value: []string{"ab", "\000"}, WillFail: true}, +} + +func setValues(t *testing.T, k registry.Key) { + for _, test := range ValueTests { + var err error + switch test.Type { + case registry.SZ: + err = k.SetStringValue(test.Name, test.Value.(string)) + case registry.EXPAND_SZ: + err = k.SetExpandStringValue(test.Name, test.Value.(string)) + case registry.MULTI_SZ: + err = k.SetStringsValue(test.Name, test.Value.([]string)) + case registry.BINARY: + err = k.SetBinaryValue(test.Name, test.Value.([]byte)) + case registry.DWORD: + err = k.SetDWordValue(test.Name, uint32(test.Value.(uint64))) + case registry.QWORD: + err = k.SetQWordValue(test.Name, test.Value.(uint64)) + default: + t.Fatalf("unsupported type %d for %s value", test.Type, test.Name) + } + if test.WillFail { + if err == nil { + t.Fatalf("setting %s value %q should fail, but succeeded", test.Name, test.Value) + } + } else { + if err != nil { + t.Fatal(err) + } + } + } +} + +func enumerateValues(t *testing.T, k registry.Key) { + names, err := k.ReadValueNames() + if err != nil { + t.Error(err) + return + } + haveNames := make(map[string]bool) + for _, n := range names { + haveNames[n] = false + } + for _, test := range ValueTests { + wantFound := !test.WillFail + _, haveFound := haveNames[test.Name] + if wantFound && !haveFound { + t.Errorf("value %s is not found while enumerating", test.Name) + } + if haveFound && !wantFound { + t.Errorf("value %s is found while enumerating, but expected to fail", test.Name) + } + if haveFound { + delete(haveNames, test.Name) + } + } + for n, v := range haveNames { + t.Errorf("value %s (%v) is found while enumerating, but has not been created", n, v) + } +} + +func testErrNotExist(t *testing.T, name string, err error) { + if err == nil { + t.Errorf("%s value should not exist", name) + return + } + if err != registry.ErrNotExist { + t.Errorf("reading %s value should return 'not exist' error, but got: %s", name, err) + return + } +} + +func testErrUnexpectedType(t *testing.T, test ValueTest, gottype uint32, err error) { + if err == nil { + t.Errorf("GetXValue(%q) should not succeed", test.Name) + return + } + if err != registry.ErrUnexpectedType { + t.Errorf("reading %s value should return 'unexpected key value type' error, but got: %s", test.Name, err) + return + } + if gottype != test.Type { + t.Errorf("want %s value type %v, got %v", test.Name, test.Type, gottype) + return + } +} + +func testGetStringValue(t *testing.T, k registry.Key, test ValueTest) { + got, gottype, err := k.GetStringValue(test.Name) + if err != nil { + t.Errorf("GetStringValue(%s) failed: %v", test.Name, err) + return + } + if got != test.Value { + t.Errorf("want %s value %q, got %q", test.Name, test.Value, got) + return + } + if gottype != test.Type { + t.Errorf("want %s value type %v, got %v", test.Name, test.Type, gottype) + return + } + if gottype == registry.EXPAND_SZ { + _, err = registry.ExpandString(got) + if err != nil { + t.Errorf("ExpandString(%s) failed: %v", got, err) + return + } + } +} + +func testGetIntegerValue(t *testing.T, k registry.Key, test ValueTest) { + got, gottype, err := k.GetIntegerValue(test.Name) + if err != nil { + t.Errorf("GetIntegerValue(%s) failed: %v", test.Name, err) + return + } + if got != test.Value.(uint64) { + t.Errorf("want %s value %v, got %v", test.Name, test.Value, got) + return + } + if gottype != test.Type { + t.Errorf("want %s value type %v, got %v", test.Name, test.Type, gottype) + return + } +} + +func testGetBinaryValue(t *testing.T, k registry.Key, test ValueTest) { + got, gottype, err := k.GetBinaryValue(test.Name) + if err != nil { + t.Errorf("GetBinaryValue(%s) failed: %v", test.Name, err) + return + } + if !bytes.Equal(got, test.Value.([]byte)) { + t.Errorf("want %s value %v, got %v", test.Name, test.Value, got) + return + } + if gottype != test.Type { + t.Errorf("want %s value type %v, got %v", test.Name, test.Type, gottype) + return + } +} + +func testGetStringsValue(t *testing.T, k registry.Key, test ValueTest) { + got, gottype, err := k.GetStringsValue(test.Name) + if err != nil { + t.Errorf("GetStringsValue(%s) failed: %v", test.Name, err) + return + } + if !equalStringSlice(got, test.Value.([]string)) { + t.Errorf("want %s value %#v, got %#v", test.Name, test.Value, got) + return + } + if gottype != test.Type { + t.Errorf("want %s value type %v, got %v", test.Name, test.Type, gottype) + return + } +} + +func testGetValue(t *testing.T, k registry.Key, test ValueTest, size int) { + if size <= 0 { + return + } + // read data with no buffer + gotsize, gottype, err := k.GetValue(test.Name, nil) + if err != nil { + t.Errorf("GetValue(%s, [%d]byte) failed: %v", test.Name, size, err) + return + } + if gotsize != size { + t.Errorf("want %s value size of %d, got %v", test.Name, size, gotsize) + return + } + if gottype != test.Type { + t.Errorf("want %s value type %v, got %v", test.Name, test.Type, gottype) + return + } + // read data with short buffer + gotsize, gottype, err = k.GetValue(test.Name, make([]byte, size-1)) + if err == nil { + t.Errorf("GetValue(%s, [%d]byte) should fail, but succeeded", test.Name, size-1) + return + } + if err != registry.ErrShortBuffer { + t.Errorf("reading %s value should return 'short buffer' error, but got: %s", test.Name, err) + return + } + if gotsize != size { + t.Errorf("want %s value size of %d, got %v", test.Name, size, gotsize) + return + } + if gottype != test.Type { + t.Errorf("want %s value type %v, got %v", test.Name, test.Type, gottype) + return + } + // read full data + gotsize, gottype, err = k.GetValue(test.Name, make([]byte, size)) + if err != nil { + t.Errorf("GetValue(%s, [%d]byte) failed: %v", test.Name, size, err) + return + } + if gotsize != size { + t.Errorf("want %s value size of %d, got %v", test.Name, size, gotsize) + return + } + if gottype != test.Type { + t.Errorf("want %s value type %v, got %v", test.Name, test.Type, gottype) + return + } + // check GetValue returns ErrNotExist as required + _, _, err = k.GetValue(test.Name+"_not_there", make([]byte, size)) + if err == nil { + t.Errorf("GetValue(%q) should not succeed", test.Name) + return + } + if err != registry.ErrNotExist { + t.Errorf("GetValue(%q) should return 'not exist' error, but got: %s", test.Name, err) + return + } +} + +func testValues(t *testing.T, k registry.Key) { + for _, test := range ValueTests { + switch test.Type { + case registry.SZ, registry.EXPAND_SZ: + if test.WillFail { + _, _, err := k.GetStringValue(test.Name) + testErrNotExist(t, test.Name, err) + } else { + testGetStringValue(t, k, test) + _, gottype, err := k.GetIntegerValue(test.Name) + testErrUnexpectedType(t, test, gottype, err) + // Size of utf16 string in bytes is not perfect, + // but correct for current test values. + // Size also includes terminating 0. + testGetValue(t, k, test, (len(test.Value.(string))+1)*2) + } + _, _, err := k.GetStringValue(test.Name + "_string_not_created") + testErrNotExist(t, test.Name+"_string_not_created", err) + case registry.DWORD, registry.QWORD: + testGetIntegerValue(t, k, test) + _, gottype, err := k.GetBinaryValue(test.Name) + testErrUnexpectedType(t, test, gottype, err) + _, _, err = k.GetIntegerValue(test.Name + "_int_not_created") + testErrNotExist(t, test.Name+"_int_not_created", err) + size := 8 + if test.Type == registry.DWORD { + size = 4 + } + testGetValue(t, k, test, size) + case registry.BINARY: + testGetBinaryValue(t, k, test) + _, gottype, err := k.GetStringsValue(test.Name) + testErrUnexpectedType(t, test, gottype, err) + _, _, err = k.GetBinaryValue(test.Name + "_byte_not_created") + testErrNotExist(t, test.Name+"_byte_not_created", err) + testGetValue(t, k, test, len(test.Value.([]byte))) + case registry.MULTI_SZ: + if test.WillFail { + _, _, err := k.GetStringsValue(test.Name) + testErrNotExist(t, test.Name, err) + } else { + testGetStringsValue(t, k, test) + _, gottype, err := k.GetStringValue(test.Name) + testErrUnexpectedType(t, test, gottype, err) + size := 0 + for _, s := range test.Value.([]string) { + size += len(s) + 1 // nil terminated + } + size += 1 // extra nil at the end + size *= 2 // count bytes, not uint16 + testGetValue(t, k, test, size) + } + _, _, err := k.GetStringsValue(test.Name + "_strings_not_created") + testErrNotExist(t, test.Name+"_strings_not_created", err) + default: + t.Errorf("unsupported type %d for %s value", test.Type, test.Name) + continue + } + } +} + +func testStat(t *testing.T, k registry.Key) { + subk, _, err := registry.CreateKey(k, "subkey", registry.CREATE_SUB_KEY) + if err != nil { + t.Error(err) + return + } + defer subk.Close() + + defer registry.DeleteKey(k, "subkey") + + ki, err := k.Stat() + if err != nil { + t.Error(err) + return + } + if ki.SubKeyCount != 1 { + t.Error("key must have 1 subkey") + } + if ki.MaxSubKeyLen != 6 { + t.Error("key max subkey name length must be 6") + } + if ki.ValueCount != 24 { + t.Errorf("key must have 24 values, but is %d", ki.ValueCount) + } + if ki.MaxValueNameLen != 12 { + t.Errorf("key max value name length must be 10, but is %d", ki.MaxValueNameLen) + } + if ki.MaxValueLen != 38 { + t.Errorf("key max value length must be 38, but is %d", ki.MaxValueLen) + } +} + +func deleteValues(t *testing.T, k registry.Key) { + for _, test := range ValueTests { + if test.WillFail { + continue + } + err := k.DeleteValue(test.Name) + if err != nil { + t.Error(err) + continue + } + } + names, err := k.ReadValueNames() + if err != nil { + t.Error(err) + return + } + if len(names) != 0 { + t.Errorf("some values remain after deletion: %v", names) + } +} + +func TestValues(t *testing.T) { + softwareK, err := registry.OpenKey(registry.CURRENT_USER, "Software", registry.QUERY_VALUE) + if err != nil { + t.Fatal(err) + } + defer softwareK.Close() + + testKName := randKeyName("TestValues_") + + k, exist, err := registry.CreateKey(softwareK, testKName, registry.CREATE_SUB_KEY|registry.QUERY_VALUE|registry.SET_VALUE) + if err != nil { + t.Fatal(err) + } + defer k.Close() + + if exist { + t.Fatalf("key %q already exists", testKName) + } + + defer registry.DeleteKey(softwareK, testKName) + + setValues(t, k) + + enumerateValues(t, k) + + testValues(t, k) + + testStat(t, k) + + deleteValues(t, k) +} + +func TestExpandString(t *testing.T) { + got, err := registry.ExpandString("%PATH%") + if err != nil { + t.Fatal(err) + } + want := os.Getenv("PATH") + if got != want { + t.Errorf("want %q string expanded, got %q", want, got) + } +} + +func TestInvalidValues(t *testing.T) { + softwareK, err := registry.OpenKey(registry.CURRENT_USER, "Software", registry.QUERY_VALUE) + if err != nil { + t.Fatal(err) + } + defer softwareK.Close() + + testKName := randKeyName("TestInvalidValues_") + + k, exist, err := registry.CreateKey(softwareK, testKName, registry.CREATE_SUB_KEY|registry.QUERY_VALUE|registry.SET_VALUE) + if err != nil { + t.Fatal(err) + } + defer k.Close() + + if exist { + t.Fatalf("key %q already exists", testKName) + } + + defer registry.DeleteKey(softwareK, testKName) + + var tests = []struct { + Type uint32 + Name string + Data []byte + }{ + {registry.DWORD, "Dword1", nil}, + {registry.DWORD, "Dword2", []byte{1, 2, 3}}, + {registry.QWORD, "Qword1", nil}, + {registry.QWORD, "Qword2", []byte{1, 2, 3}}, + {registry.QWORD, "Qword3", []byte{1, 2, 3, 4, 5, 6, 7}}, + {registry.MULTI_SZ, "MultiString1", nil}, + {registry.MULTI_SZ, "MultiString2", []byte{0}}, + {registry.MULTI_SZ, "MultiString3", []byte{'a', 'b', 0}}, + {registry.MULTI_SZ, "MultiString4", []byte{'a', 0, 0, 'b', 0}}, + {registry.MULTI_SZ, "MultiString5", []byte{'a', 0, 0}}, + } + + for _, test := range tests { + err := k.SetValue(test.Name, test.Type, test.Data) + if err != nil { + t.Fatalf("SetValue for %q failed: %v", test.Name, err) + } + } + + for _, test := range tests { + switch test.Type { + case registry.DWORD, registry.QWORD: + value, valType, err := k.GetIntegerValue(test.Name) + if err == nil { + t.Errorf("GetIntegerValue(%q) succeeded. Returns type=%d value=%v", test.Name, valType, value) + } + case registry.MULTI_SZ: + value, valType, err := k.GetStringsValue(test.Name) + if err == nil { + if len(value) != 0 { + t.Errorf("GetStringsValue(%q) succeeded. Returns type=%d value=%v", test.Name, valType, value) + } + } + default: + t.Errorf("unsupported type %d for %s value", test.Type, test.Name) + } + } +} + +func TestGetMUIStringValue(t *testing.T) { + var dtzi DynamicTimezoneinformation + if _, err := GetDynamicTimeZoneInformation(&dtzi); err != nil { + t.Fatal(err) + } + tzKeyName := syscall.UTF16ToString(dtzi.TimeZoneKeyName[:]) + timezoneK, err := registry.OpenKey(registry.LOCAL_MACHINE, + `SOFTWARE\Microsoft\Windows NT\CurrentVersion\Time Zones\`+tzKeyName, registry.READ) + if err != nil { + t.Fatal(err) + } + defer timezoneK.Close() + + type testType struct { + name string + want string + } + var tests = []testType{ + {"MUI_Std", syscall.UTF16ToString(dtzi.StandardName[:])}, + } + if dtzi.DynamicDaylightTimeDisabled == 0 { + tests = append(tests, testType{"MUI_Dlt", syscall.UTF16ToString(dtzi.DaylightName[:])}) + } + + for _, test := range tests { + got, err := timezoneK.GetMUIStringValue(test.name) + if err != nil { + t.Error("GetMUIStringValue:", err) + } + + if got != test.want { + t.Errorf("GetMUIStringValue: %s: Got %q, want %q", test.name, got, test.want) + } + } +} + +type DynamicTimezoneinformation struct { + Bias int32 + StandardName [32]uint16 + StandardDate syscall.Systemtime + StandardBias int32 + DaylightName [32]uint16 + DaylightDate syscall.Systemtime + DaylightBias int32 + TimeZoneKeyName [128]uint16 + DynamicDaylightTimeDisabled uint8 +} + +var ( + modkernel32 = syscall.NewLazyDLL("kernel32.dll") + + procGetDynamicTimeZoneInformation = modkernel32.NewProc("GetDynamicTimeZoneInformation") +) + +func GetDynamicTimeZoneInformation(dtzi *DynamicTimezoneinformation) (rc uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetDynamicTimeZoneInformation.Addr(), 1, uintptr(unsafe.Pointer(dtzi)), 0, 0) + rc = uint32(r0) + if rc == 0xffffffff { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} diff --git a/testing/internal/syscall/windows/registry/syscall.go b/testing/internal/syscall/windows/registry/syscall.go new file mode 100644 index 0000000..8e73091 --- /dev/null +++ b/testing/internal/syscall/windows/registry/syscall.go @@ -0,0 +1,27 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build windows + +package registry + +import "syscall" + +const ( + _REG_OPTION_NON_VOLATILE = 0 + + _REG_CREATED_NEW_KEY = 1 + _REG_OPENED_EXISTING_KEY = 2 + + _ERROR_NO_MORE_ITEMS syscall.Errno = 259 +) + +//sys regCreateKeyEx(key syscall.Handle, subkey *uint16, reserved uint32, class *uint16, options uint32, desired uint32, sa *syscall.SecurityAttributes, result *syscall.Handle, disposition *uint32) (regerrno error) = advapi32.RegCreateKeyExW +//sys regDeleteKey(key syscall.Handle, subkey *uint16) (regerrno error) = advapi32.RegDeleteKeyW +//sys regSetValueEx(key syscall.Handle, valueName *uint16, reserved uint32, vtype uint32, buf *byte, bufsize uint32) (regerrno error) = advapi32.RegSetValueExW +//sys regEnumValue(key syscall.Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) = advapi32.RegEnumValueW +//sys regDeleteValue(key syscall.Handle, name *uint16) (regerrno error) = advapi32.RegDeleteValueW +//sys regLoadMUIString(key syscall.Handle, name *uint16, buf *uint16, buflen uint32, buflenCopied *uint32, flags uint32, dir *uint16) (regerrno error) = advapi32.RegLoadMUIStringW + +//sys expandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, err error) = kernel32.ExpandEnvironmentStringsW diff --git a/testing/internal/syscall/windows/registry/value.go b/testing/internal/syscall/windows/registry/value.go new file mode 100644 index 0000000..67b1144 --- /dev/null +++ b/testing/internal/syscall/windows/registry/value.go @@ -0,0 +1,369 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build windows + +package registry + +import ( + "errors" + "syscall" + "unicode/utf16" + "unsafe" +) + +const ( + // Registry value types. + NONE = 0 + SZ = 1 + EXPAND_SZ = 2 + BINARY = 3 + DWORD = 4 + DWORD_BIG_ENDIAN = 5 + LINK = 6 + MULTI_SZ = 7 + RESOURCE_LIST = 8 + FULL_RESOURCE_DESCRIPTOR = 9 + RESOURCE_REQUIREMENTS_LIST = 10 + QWORD = 11 +) + +var ( + // ErrShortBuffer is returned when the buffer was too short for the operation. + ErrShortBuffer = syscall.ERROR_MORE_DATA + + // ErrNotExist is returned when a registry key or value does not exist. + ErrNotExist = syscall.ERROR_FILE_NOT_FOUND + + // ErrUnexpectedType is returned by Get*Value when the value's type was unexpected. + ErrUnexpectedType = errors.New("unexpected key value type") +) + +// GetValue retrieves the type and data for the specified value associated +// with an open key k. It fills up buffer buf and returns the retrieved +// byte count n. If buf is too small to fit the stored value it returns +// ErrShortBuffer error along with the required buffer size n. +// If no buffer is provided, it returns true and actual buffer size n. +// If no buffer is provided, GetValue returns the value's type only. +// If the value does not exist, the error returned is ErrNotExist. +// +// GetValue is a low level function. If value's type is known, use the appropriate +// Get*Value function instead. +func (k Key) GetValue(name string, buf []byte) (n int, valtype uint32, err error) { + pname, err := syscall.UTF16PtrFromString(name) + if err != nil { + return 0, 0, err + } + var pbuf *byte + if len(buf) > 0 { + pbuf = (*byte)(unsafe.Pointer(&buf[0])) + } + l := uint32(len(buf)) + err = syscall.RegQueryValueEx(syscall.Handle(k), pname, nil, &valtype, pbuf, &l) + if err != nil { + return int(l), valtype, err + } + return int(l), valtype, nil +} + +func (k Key) getValue(name string, buf []byte) (date []byte, valtype uint32, err error) { + p, err := syscall.UTF16PtrFromString(name) + if err != nil { + return nil, 0, err + } + var t uint32 + n := uint32(len(buf)) + for { + err = syscall.RegQueryValueEx(syscall.Handle(k), p, nil, &t, (*byte)(unsafe.Pointer(&buf[0])), &n) + if err == nil { + return buf[:n], t, nil + } + if err != syscall.ERROR_MORE_DATA { + return nil, 0, err + } + if n <= uint32(len(buf)) { + return nil, 0, err + } + buf = make([]byte, n) + } +} + +// GetStringValue retrieves the string value for the specified +// value name associated with an open key k. It also returns the value's type. +// If value does not exist, GetStringValue returns ErrNotExist. +// If value is not SZ or EXPAND_SZ, it will return the correct value +// type and ErrUnexpectedType. +func (k Key) GetStringValue(name string) (val string, valtype uint32, err error) { + data, typ, err2 := k.getValue(name, make([]byte, 64)) + if err2 != nil { + return "", typ, err2 + } + switch typ { + case SZ, EXPAND_SZ: + default: + return "", typ, ErrUnexpectedType + } + if len(data) == 0 { + return "", typ, nil + } + u := (*[1 << 29]uint16)(unsafe.Pointer(&data[0]))[: len(data)/2 : len(data)/2] + return syscall.UTF16ToString(u), typ, nil +} + +// GetMUIStringValue retrieves the localized string value for +// the specified value name associated with an open key k. +// If the value name doesn't exist or the localized string value +// can't be resolved, GetMUIStringValue returns ErrNotExist. +func (k Key) GetMUIStringValue(name string) (string, error) { + pname, err := syscall.UTF16PtrFromString(name) + if err != nil { + return "", err + } + + buf := make([]uint16, 1024) + var buflen uint32 + var pdir *uint16 + + err = regLoadMUIString(syscall.Handle(k), pname, &buf[0], uint32(len(buf)), &buflen, 0, pdir) + if err == syscall.ERROR_FILE_NOT_FOUND { // Try fallback path + + // Try to resolve the string value using the system directory as + // a DLL search path; this assumes the string value is of the form + // @[path]\dllname,-strID but with no path given, e.g. @tzres.dll,-320. + + // This approach works with tzres.dll but may have to be revised + // in the future to allow callers to provide custom search paths. + + var s string + s, err = ExpandString("%SystemRoot%\\system32\\") + if err != nil { + return "", err + } + pdir, err = syscall.UTF16PtrFromString(s) + if err != nil { + return "", err + } + + err = regLoadMUIString(syscall.Handle(k), pname, &buf[0], uint32(len(buf)), &buflen, 0, pdir) + } + + for err == syscall.ERROR_MORE_DATA { // Grow buffer if needed + if buflen <= uint32(len(buf)) { + break // Buffer not growing, assume race; break + } + buf = make([]uint16, buflen) + err = regLoadMUIString(syscall.Handle(k), pname, &buf[0], uint32(len(buf)), &buflen, 0, pdir) + } + + if err != nil { + return "", err + } + + return syscall.UTF16ToString(buf), nil +} + +// ExpandString expands environment-variable strings and replaces +// them with the values defined for the current user. +// Use ExpandString to expand EXPAND_SZ strings. +func ExpandString(value string) (string, error) { + if value == "" { + return "", nil + } + p, err := syscall.UTF16PtrFromString(value) + if err != nil { + return "", err + } + r := make([]uint16, 100) + for { + n, err := expandEnvironmentStrings(p, &r[0], uint32(len(r))) + if err != nil { + return "", err + } + if n <= uint32(len(r)) { + return syscall.UTF16ToString(r[:n]), nil + } + r = make([]uint16, n) + } +} + +// GetStringsValue retrieves the []string value for the specified +// value name associated with an open key k. It also returns the value's type. +// If value does not exist, GetStringsValue returns ErrNotExist. +// If value is not MULTI_SZ, it will return the correct value +// type and ErrUnexpectedType. +func (k Key) GetStringsValue(name string) (val []string, valtype uint32, err error) { + data, typ, err2 := k.getValue(name, make([]byte, 64)) + if err2 != nil { + return nil, typ, err2 + } + if typ != MULTI_SZ { + return nil, typ, ErrUnexpectedType + } + if len(data) == 0 { + return nil, typ, nil + } + p := (*[1 << 29]uint16)(unsafe.Pointer(&data[0]))[: len(data)/2 : len(data)/2] + if len(p) == 0 { + return nil, typ, nil + } + if p[len(p)-1] == 0 { + p = p[:len(p)-1] // remove terminating null + } + val = make([]string, 0, 5) + from := 0 + for i, c := range p { + if c == 0 { + val = append(val, syscall.UTF16ToString(p[from:i])) + from = i + 1 + } + } + return val, typ, nil +} + +// GetIntegerValue retrieves the integer value for the specified +// value name associated with an open key k. It also returns the value's type. +// If value does not exist, GetIntegerValue returns ErrNotExist. +// If value is not DWORD or QWORD, it will return the correct value +// type and ErrUnexpectedType. +func (k Key) GetIntegerValue(name string) (val uint64, valtype uint32, err error) { + data, typ, err2 := k.getValue(name, make([]byte, 8)) + if err2 != nil { + return 0, typ, err2 + } + switch typ { + case DWORD: + if len(data) != 4 { + return 0, typ, errors.New("DWORD value is not 4 bytes long") + } + return uint64(*(*uint32)(unsafe.Pointer(&data[0]))), DWORD, nil + case QWORD: + if len(data) != 8 { + return 0, typ, errors.New("QWORD value is not 8 bytes long") + } + return *(*uint64)(unsafe.Pointer(&data[0])), QWORD, nil + default: + return 0, typ, ErrUnexpectedType + } +} + +// GetBinaryValue retrieves the binary value for the specified +// value name associated with an open key k. It also returns the value's type. +// If value does not exist, GetBinaryValue returns ErrNotExist. +// If value is not BINARY, it will return the correct value +// type and ErrUnexpectedType. +func (k Key) GetBinaryValue(name string) (val []byte, valtype uint32, err error) { + data, typ, err2 := k.getValue(name, make([]byte, 64)) + if err2 != nil { + return nil, typ, err2 + } + if typ != BINARY { + return nil, typ, ErrUnexpectedType + } + return data, typ, nil +} + +func (k Key) setValue(name string, valtype uint32, data []byte) error { + p, err := syscall.UTF16PtrFromString(name) + if err != nil { + return err + } + if len(data) == 0 { + return regSetValueEx(syscall.Handle(k), p, 0, valtype, nil, 0) + } + return regSetValueEx(syscall.Handle(k), p, 0, valtype, &data[0], uint32(len(data))) +} + +// SetDWordValue sets the data and type of a name value +// under key k to value and DWORD. +func (k Key) SetDWordValue(name string, value uint32) error { + return k.setValue(name, DWORD, (*[4]byte)(unsafe.Pointer(&value))[:]) +} + +// SetQWordValue sets the data and type of a name value +// under key k to value and QWORD. +func (k Key) SetQWordValue(name string, value uint64) error { + return k.setValue(name, QWORD, (*[8]byte)(unsafe.Pointer(&value))[:]) +} + +func (k Key) setStringValue(name string, valtype uint32, value string) error { + v, err := syscall.UTF16FromString(value) + if err != nil { + return err + } + buf := (*[1 << 29]byte)(unsafe.Pointer(&v[0]))[: len(v)*2 : len(v)*2] + return k.setValue(name, valtype, buf) +} + +// SetStringValue sets the data and type of a name value +// under key k to value and SZ. The value must not contain a zero byte. +func (k Key) SetStringValue(name, value string) error { + return k.setStringValue(name, SZ, value) +} + +// SetExpandStringValue sets the data and type of a name value +// under key k to value and EXPAND_SZ. The value must not contain a zero byte. +func (k Key) SetExpandStringValue(name, value string) error { + return k.setStringValue(name, EXPAND_SZ, value) +} + +// SetStringsValue sets the data and type of a name value +// under key k to value and MULTI_SZ. The value strings +// must not contain a zero byte. +func (k Key) SetStringsValue(name string, value []string) error { + ss := "" + for _, s := range value { + for i := 0; i < len(s); i++ { + if s[i] == 0 { + return errors.New("string cannot have 0 inside") + } + } + ss += s + "\x00" + } + v := utf16.Encode([]rune(ss + "\x00")) + buf := (*[1 << 29]byte)(unsafe.Pointer(&v[0]))[: len(v)*2 : len(v)*2] + return k.setValue(name, MULTI_SZ, buf) +} + +// SetBinaryValue sets the data and type of a name value +// under key k to value and BINARY. +func (k Key) SetBinaryValue(name string, value []byte) error { + return k.setValue(name, BINARY, value) +} + +// DeleteValue removes a named value from the key k. +func (k Key) DeleteValue(name string) error { + return regDeleteValue(syscall.Handle(k), syscall.StringToUTF16Ptr(name)) +} + +// ReadValueNames returns the value names of key k. +func (k Key) ReadValueNames() ([]string, error) { + ki, err := k.Stat() + if err != nil { + return nil, err + } + names := make([]string, 0, ki.ValueCount) + buf := make([]uint16, ki.MaxValueNameLen+1) // extra room for terminating null character +loopItems: + for i := uint32(0); ; i++ { + l := uint32(len(buf)) + for { + err := regEnumValue(syscall.Handle(k), i, &buf[0], &l, nil, nil, nil, nil) + if err == nil { + break + } + if err == syscall.ERROR_MORE_DATA { + // Double buffer size and try again. + l = uint32(2 * len(buf)) + buf = make([]uint16, l) + continue + } + if err == _ERROR_NO_MORE_ITEMS { + break loopItems + } + return names, err + } + names = append(names, syscall.UTF16ToString(buf[:l])) + } + return names, nil +} diff --git a/testing/internal/syscall/windows/registry/zsyscall_windows.go b/testing/internal/syscall/windows/registry/zsyscall_windows.go new file mode 100644 index 0000000..4cf674c --- /dev/null +++ b/testing/internal/syscall/windows/registry/zsyscall_windows.go @@ -0,0 +1,107 @@ +// Code generated by 'go generate'; DO NOT EDIT. + +package registry + +import ( + "github.com/CodSpeedHQ/codspeed-go/testing/internal/syscall/windows/sysdll" + "syscall" + "unsafe" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) + errERROR_EINVAL error = syscall.EINVAL +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return errERROR_EINVAL + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modadvapi32 = syscall.NewLazyDLL(sysdll.Add("advapi32.dll")) + modkernel32 = syscall.NewLazyDLL(sysdll.Add("kernel32.dll")) + + procRegCreateKeyExW = modadvapi32.NewProc("RegCreateKeyExW") + procRegDeleteKeyW = modadvapi32.NewProc("RegDeleteKeyW") + procRegDeleteValueW = modadvapi32.NewProc("RegDeleteValueW") + procRegEnumValueW = modadvapi32.NewProc("RegEnumValueW") + procRegLoadMUIStringW = modadvapi32.NewProc("RegLoadMUIStringW") + procRegSetValueExW = modadvapi32.NewProc("RegSetValueExW") + procExpandEnvironmentStringsW = modkernel32.NewProc("ExpandEnvironmentStringsW") +) + +func regCreateKeyEx(key syscall.Handle, subkey *uint16, reserved uint32, class *uint16, options uint32, desired uint32, sa *syscall.SecurityAttributes, result *syscall.Handle, disposition *uint32) (regerrno error) { + r0, _, _ := syscall.Syscall9(procRegCreateKeyExW.Addr(), 9, uintptr(key), uintptr(unsafe.Pointer(subkey)), uintptr(reserved), uintptr(unsafe.Pointer(class)), uintptr(options), uintptr(desired), uintptr(unsafe.Pointer(sa)), uintptr(unsafe.Pointer(result)), uintptr(unsafe.Pointer(disposition))) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regDeleteKey(key syscall.Handle, subkey *uint16) (regerrno error) { + r0, _, _ := syscall.Syscall(procRegDeleteKeyW.Addr(), 2, uintptr(key), uintptr(unsafe.Pointer(subkey)), 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regDeleteValue(key syscall.Handle, name *uint16) (regerrno error) { + r0, _, _ := syscall.Syscall(procRegDeleteValueW.Addr(), 2, uintptr(key), uintptr(unsafe.Pointer(name)), 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regEnumValue(key syscall.Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) { + r0, _, _ := syscall.Syscall9(procRegEnumValueW.Addr(), 8, uintptr(key), uintptr(index), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(valtype)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(buflen)), 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regLoadMUIString(key syscall.Handle, name *uint16, buf *uint16, buflen uint32, buflenCopied *uint32, flags uint32, dir *uint16) (regerrno error) { + r0, _, _ := syscall.Syscall9(procRegLoadMUIStringW.Addr(), 7, uintptr(key), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buf)), uintptr(buflen), uintptr(unsafe.Pointer(buflenCopied)), uintptr(flags), uintptr(unsafe.Pointer(dir)), 0, 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regSetValueEx(key syscall.Handle, valueName *uint16, reserved uint32, vtype uint32, buf *byte, bufsize uint32) (regerrno error) { + r0, _, _ := syscall.Syscall6(procRegSetValueExW.Addr(), 6, uintptr(key), uintptr(unsafe.Pointer(valueName)), uintptr(reserved), uintptr(vtype), uintptr(unsafe.Pointer(buf)), uintptr(bufsize)) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func expandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procExpandEnvironmentStringsW.Addr(), 3, uintptr(unsafe.Pointer(src)), uintptr(unsafe.Pointer(dst)), uintptr(size)) + n = uint32(r0) + if n == 0 { + err = errnoErr(e1) + } + return +} diff --git a/testing/internal/syscall/windows/reparse_windows.go b/testing/internal/syscall/windows/reparse_windows.go new file mode 100644 index 0000000..241dd52 --- /dev/null +++ b/testing/internal/syscall/windows/reparse_windows.go @@ -0,0 +1,94 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package windows + +import ( + "syscall" + "unsafe" +) + +// Reparse tag values are taken from +// https://learn.microsoft.com/en-us/openspecs/windows_protocols/ms-fscc/c8e77b37-3909-4fe6-a4ea-2b9d423b1ee4 +const ( + FSCTL_SET_REPARSE_POINT = 0x000900A4 + IO_REPARSE_TAG_MOUNT_POINT = 0xA0000003 + IO_REPARSE_TAG_DEDUP = 0x80000013 + IO_REPARSE_TAG_AF_UNIX = 0x80000023 + + SYMLINK_FLAG_RELATIVE = 1 +) + +// These structures are described +// in https://learn.microsoft.com/en-us/openspecs/windows_protocols/ms-fscc/ca069dad-ed16-42aa-b057-b6b207f447cc +// and https://learn.microsoft.com/en-us/openspecs/windows_protocols/ms-fscc/b41f1cbf-10df-4a47-98d4-1c52a833d913. + +type REPARSE_DATA_BUFFER struct { + ReparseTag uint32 + ReparseDataLength uint16 + Reserved uint16 + DUMMYUNIONNAME byte +} + +// REPARSE_DATA_BUFFER_HEADER is a common part of REPARSE_DATA_BUFFER structure. +type REPARSE_DATA_BUFFER_HEADER struct { + ReparseTag uint32 + // The size, in bytes, of the reparse data that follows + // the common portion of the REPARSE_DATA_BUFFER element. + // This value is the length of the data starting at the + // SubstituteNameOffset field. + ReparseDataLength uint16 + Reserved uint16 +} + +type SymbolicLinkReparseBuffer struct { + // The integer that contains the offset, in bytes, + // of the substitute name string in the PathBuffer array, + // computed as an offset from byte 0 of PathBuffer. Note that + // this offset must be divided by 2 to get the array index. + SubstituteNameOffset uint16 + // The integer that contains the length, in bytes, of the + // substitute name string. If this string is null-terminated, + // SubstituteNameLength does not include the Unicode null character. + SubstituteNameLength uint16 + // PrintNameOffset is similar to SubstituteNameOffset. + PrintNameOffset uint16 + // PrintNameLength is similar to SubstituteNameLength. + PrintNameLength uint16 + // Flags specifies whether the substitute name is a full path name or + // a path name relative to the directory containing the symbolic link. + Flags uint32 + PathBuffer [1]uint16 +} + +// Path returns path stored in rb. +func (rb *SymbolicLinkReparseBuffer) Path() string { + n1 := rb.SubstituteNameOffset / 2 + n2 := (rb.SubstituteNameOffset + rb.SubstituteNameLength) / 2 + return syscall.UTF16ToString((*[0xffff]uint16)(unsafe.Pointer(&rb.PathBuffer[0]))[n1:n2:n2]) +} + +type MountPointReparseBuffer struct { + // The integer that contains the offset, in bytes, + // of the substitute name string in the PathBuffer array, + // computed as an offset from byte 0 of PathBuffer. Note that + // this offset must be divided by 2 to get the array index. + SubstituteNameOffset uint16 + // The integer that contains the length, in bytes, of the + // substitute name string. If this string is null-terminated, + // SubstituteNameLength does not include the Unicode null character. + SubstituteNameLength uint16 + // PrintNameOffset is similar to SubstituteNameOffset. + PrintNameOffset uint16 + // PrintNameLength is similar to SubstituteNameLength. + PrintNameLength uint16 + PathBuffer [1]uint16 +} + +// Path returns path stored in rb. +func (rb *MountPointReparseBuffer) Path() string { + n1 := rb.SubstituteNameOffset / 2 + n2 := (rb.SubstituteNameOffset + rb.SubstituteNameLength) / 2 + return syscall.UTF16ToString((*[0xffff]uint16)(unsafe.Pointer(&rb.PathBuffer[0]))[n1:n2:n2]) +} diff --git a/testing/internal/syscall/windows/security_windows.go b/testing/internal/syscall/windows/security_windows.go new file mode 100644 index 0000000..017e25a --- /dev/null +++ b/testing/internal/syscall/windows/security_windows.go @@ -0,0 +1,264 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package windows + +import ( + "runtime" + "syscall" + "unsafe" +) + +const ( + SecurityAnonymous = 0 + SecurityIdentification = 1 + SecurityImpersonation = 2 + SecurityDelegation = 3 +) + +//sys ImpersonateSelf(impersonationlevel uint32) (err error) = advapi32.ImpersonateSelf +//sys RevertToSelf() (err error) = advapi32.RevertToSelf +//sys ImpersonateLoggedOnUser(token syscall.Token) (err error) = advapi32.ImpersonateLoggedOnUser +//sys LogonUser(username *uint16, domain *uint16, password *uint16, logonType uint32, logonProvider uint32, token *syscall.Token) (err error) = advapi32.LogonUserW + +const ( + TOKEN_ADJUST_PRIVILEGES = 0x0020 + SE_PRIVILEGE_ENABLED = 0x00000002 +) + +type LUID struct { + LowPart uint32 + HighPart int32 +} + +type LUID_AND_ATTRIBUTES struct { + Luid LUID + Attributes uint32 +} + +type TOKEN_PRIVILEGES struct { + PrivilegeCount uint32 + Privileges [1]LUID_AND_ATTRIBUTES +} + +//sys OpenThreadToken(h syscall.Handle, access uint32, openasself bool, token *syscall.Token) (err error) = advapi32.OpenThreadToken +//sys LookupPrivilegeValue(systemname *uint16, name *uint16, luid *LUID) (err error) = advapi32.LookupPrivilegeValueW +//sys adjustTokenPrivileges(token syscall.Token, disableAllPrivileges bool, newstate *TOKEN_PRIVILEGES, buflen uint32, prevstate *TOKEN_PRIVILEGES, returnlen *uint32) (ret uint32, err error) [true] = advapi32.AdjustTokenPrivileges + +func AdjustTokenPrivileges(token syscall.Token, disableAllPrivileges bool, newstate *TOKEN_PRIVILEGES, buflen uint32, prevstate *TOKEN_PRIVILEGES, returnlen *uint32) error { + ret, err := adjustTokenPrivileges(token, disableAllPrivileges, newstate, buflen, prevstate, returnlen) + if ret == 0 { + // AdjustTokenPrivileges call failed + return err + } + // AdjustTokenPrivileges call succeeded + if err == syscall.EINVAL { + // GetLastError returned ERROR_SUCCESS + return nil + } + return err +} + +//sys DuplicateTokenEx(hExistingToken syscall.Token, dwDesiredAccess uint32, lpTokenAttributes *syscall.SecurityAttributes, impersonationLevel uint32, tokenType TokenType, phNewToken *syscall.Token) (err error) = advapi32.DuplicateTokenEx +//sys SetTokenInformation(tokenHandle syscall.Token, tokenInformationClass uint32, tokenInformation uintptr, tokenInformationLength uint32) (err error) = advapi32.SetTokenInformation + +type SID_AND_ATTRIBUTES struct { + Sid *syscall.SID + Attributes uint32 +} + +type TOKEN_MANDATORY_LABEL struct { + Label SID_AND_ATTRIBUTES +} + +func (tml *TOKEN_MANDATORY_LABEL) Size() uint32 { + return uint32(unsafe.Sizeof(TOKEN_MANDATORY_LABEL{})) + syscall.GetLengthSid(tml.Label.Sid) +} + +const SE_GROUP_INTEGRITY = 0x00000020 + +type TokenType uint32 + +const ( + TokenPrimary TokenType = 1 + TokenImpersonation TokenType = 2 +) + +//sys GetProfilesDirectory(dir *uint16, dirLen *uint32) (err error) = userenv.GetProfilesDirectoryW + +const ( + LG_INCLUDE_INDIRECT = 0x1 + MAX_PREFERRED_LENGTH = 0xFFFFFFFF +) + +type LocalGroupUserInfo0 struct { + Name *uint16 +} + +const ( + NERR_UserNotFound syscall.Errno = 2221 + NERR_UserExists syscall.Errno = 2224 +) + +const ( + USER_PRIV_USER = 1 +) + +type UserInfo1 struct { + Name *uint16 + Password *uint16 + PasswordAge uint32 + Priv uint32 + HomeDir *uint16 + Comment *uint16 + Flags uint32 + ScriptPath *uint16 +} + +type UserInfo4 struct { + Name *uint16 + Password *uint16 + PasswordAge uint32 + Priv uint32 + HomeDir *uint16 + Comment *uint16 + Flags uint32 + ScriptPath *uint16 + AuthFlags uint32 + FullName *uint16 + UsrComment *uint16 + Parms *uint16 + Workstations *uint16 + LastLogon uint32 + LastLogoff uint32 + AcctExpires uint32 + MaxStorage uint32 + UnitsPerWeek uint32 + LogonHours *byte + BadPwCount uint32 + NumLogons uint32 + LogonServer *uint16 + CountryCode uint32 + CodePage uint32 + UserSid *syscall.SID + PrimaryGroupID uint32 + Profile *uint16 + HomeDirDrive *uint16 + PasswordExpired uint32 +} + +//sys NetUserAdd(serverName *uint16, level uint32, buf *byte, parmErr *uint32) (neterr error) = netapi32.NetUserAdd +//sys NetUserDel(serverName *uint16, userName *uint16) (neterr error) = netapi32.NetUserDel +//sys NetUserGetLocalGroups(serverName *uint16, userName *uint16, level uint32, flags uint32, buf **byte, prefMaxLen uint32, entriesRead *uint32, totalEntries *uint32) (neterr error) = netapi32.NetUserGetLocalGroups + +// GetSystemDirectory retrieves the path to current location of the system +// directory, which is typically, though not always, `C:\Windows\System32`. +// +//go:linkname GetSystemDirectory +func GetSystemDirectory() string // Implemented in runtime package. + +// GetUserName retrieves the user name of the current thread +// in the specified format. +func GetUserName(format uint32) (string, error) { + n := uint32(50) + for { + b := make([]uint16, n) + e := syscall.GetUserNameEx(format, &b[0], &n) + if e == nil { + return syscall.UTF16ToString(b[:n]), nil + } + if e != syscall.ERROR_MORE_DATA { + return "", e + } + if n <= uint32(len(b)) { + return "", e + } + } +} + +// getTokenInfo retrieves a specified type of information about an access token. +func getTokenInfo(t syscall.Token, class uint32, initSize int) (unsafe.Pointer, error) { + n := uint32(initSize) + for { + b := make([]byte, n) + e := syscall.GetTokenInformation(t, class, &b[0], uint32(len(b)), &n) + if e == nil { + return unsafe.Pointer(&b[0]), nil + } + if e != syscall.ERROR_INSUFFICIENT_BUFFER { + return nil, e + } + if n <= uint32(len(b)) { + return nil, e + } + } +} + +type TOKEN_GROUPS struct { + GroupCount uint32 + Groups [1]SID_AND_ATTRIBUTES +} + +func (g *TOKEN_GROUPS) AllGroups() []SID_AND_ATTRIBUTES { + return (*[(1 << 28) - 1]SID_AND_ATTRIBUTES)(unsafe.Pointer(&g.Groups[0]))[:g.GroupCount:g.GroupCount] +} + +func GetTokenGroups(t syscall.Token) (*TOKEN_GROUPS, error) { + i, e := getTokenInfo(t, syscall.TokenGroups, 50) + if e != nil { + return nil, e + } + return (*TOKEN_GROUPS)(i), nil +} + +// https://learn.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-sid_identifier_authority +type SID_IDENTIFIER_AUTHORITY struct { + Value [6]byte +} + +const ( + SID_REVISION = 1 + // https://learn.microsoft.com/en-us/windows/win32/services/localsystem-account + SECURITY_LOCAL_SYSTEM_RID = 18 + // https://learn.microsoft.com/en-us/windows/win32/services/localservice-account + SECURITY_LOCAL_SERVICE_RID = 19 + // https://learn.microsoft.com/en-us/windows/win32/services/networkservice-account + SECURITY_NETWORK_SERVICE_RID = 20 +) + +var SECURITY_NT_AUTHORITY = SID_IDENTIFIER_AUTHORITY{ + Value: [6]byte{0, 0, 0, 0, 0, 5}, +} + +//sys IsValidSid(sid *syscall.SID) (valid bool) = advapi32.IsValidSid +//sys getSidIdentifierAuthority(sid *syscall.SID) (idauth uintptr) = advapi32.GetSidIdentifierAuthority +//sys getSidSubAuthority(sid *syscall.SID, subAuthorityIdx uint32) (subAuth uintptr) = advapi32.GetSidSubAuthority +//sys getSidSubAuthorityCount(sid *syscall.SID) (count uintptr) = advapi32.GetSidSubAuthorityCount + +// The following GetSid* functions are marked as //go:nocheckptr because checkptr +// instrumentation can't see that the pointer returned by the syscall is pointing +// into the sid's memory, which is normally allocated on the Go heap. Therefore, +// the checkptr instrumentation would incorrectly flag the pointer dereference +// as pointing to an invalid allocation. +// Also, use runtime.KeepAlive to ensure that the sid is not garbage collected +// before the GetSid* functions return, as the Go GC is not aware that the +// pointers returned by the syscall are pointing into the sid's memory. + +//go:nocheckptr +func GetSidIdentifierAuthority(sid *syscall.SID) SID_IDENTIFIER_AUTHORITY { + defer runtime.KeepAlive(sid) + return *(*SID_IDENTIFIER_AUTHORITY)(unsafe.Pointer(getSidIdentifierAuthority(sid))) +} + +//go:nocheckptr +func GetSidSubAuthority(sid *syscall.SID, subAuthorityIdx uint32) uint32 { + defer runtime.KeepAlive(sid) + return *(*uint32)(unsafe.Pointer(getSidSubAuthority(sid, subAuthorityIdx))) +} + +//go:nocheckptr +func GetSidSubAuthorityCount(sid *syscall.SID) uint8 { + defer runtime.KeepAlive(sid) + return *(*uint8)(unsafe.Pointer(getSidSubAuthorityCount(sid))) +} diff --git a/testing/internal/syscall/windows/string_windows.go b/testing/internal/syscall/windows/string_windows.go new file mode 100644 index 0000000..eb6893d --- /dev/null +++ b/testing/internal/syscall/windows/string_windows.go @@ -0,0 +1,32 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package windows + +import "syscall" + +// NTUnicodeString is a UTF-16 string for NT native APIs, corresponding to UNICODE_STRING. +type NTUnicodeString struct { + Length uint16 + MaximumLength uint16 + Buffer *uint16 +} + +// NewNTUnicodeString returns a new NTUnicodeString structure for use with native +// NT APIs that work over the NTUnicodeString type. Note that most Windows APIs +// do not use NTUnicodeString, and instead UTF16PtrFromString should be used for +// the more common *uint16 string type. +func NewNTUnicodeString(s string) (*NTUnicodeString, error) { + s16, err := syscall.UTF16FromString(s) + if err != nil { + return nil, err + } + n := uint16(len(s16) * 2) + // https://learn.microsoft.com/en-us/windows-hardware/drivers/ddi/wdmsec/nf-wdmsec-wdmlibrtlinitunicodestringex + return &NTUnicodeString{ + Length: n - 2, // subtract 2 bytes for the NUL terminator + MaximumLength: n, + Buffer: &s16[0], + }, nil +} diff --git a/testing/internal/syscall/windows/symlink_windows.go b/testing/internal/syscall/windows/symlink_windows.go new file mode 100644 index 0000000..b912460 --- /dev/null +++ b/testing/internal/syscall/windows/symlink_windows.go @@ -0,0 +1,42 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package windows + +import "syscall" + +const ( + ERROR_INVALID_PARAMETER syscall.Errno = 87 + + FILE_SUPPORTS_OBJECT_IDS = 0x00010000 + FILE_SUPPORTS_OPEN_BY_FILE_ID = 0x01000000 + + // symlink support for CreateSymbolicLink() starting with Windows 10 (1703, v10.0.14972) + SYMBOLIC_LINK_FLAG_ALLOW_UNPRIVILEGED_CREATE = 0x2 + + // FileInformationClass values + FileBasicInfo = 0 // FILE_BASIC_INFO + FileStandardInfo = 1 // FILE_STANDARD_INFO + FileNameInfo = 2 // FILE_NAME_INFO + FileStreamInfo = 7 // FILE_STREAM_INFO + FileCompressionInfo = 8 // FILE_COMPRESSION_INFO + FileAttributeTagInfo = 9 // FILE_ATTRIBUTE_TAG_INFO + FileIdBothDirectoryInfo = 0xa // FILE_ID_BOTH_DIR_INFO + FileIdBothDirectoryRestartInfo = 0xb // FILE_ID_BOTH_DIR_INFO + FileRemoteProtocolInfo = 0xd // FILE_REMOTE_PROTOCOL_INFO + FileFullDirectoryInfo = 0xe // FILE_FULL_DIR_INFO + FileFullDirectoryRestartInfo = 0xf // FILE_FULL_DIR_INFO + FileStorageInfo = 0x10 // FILE_STORAGE_INFO + FileAlignmentInfo = 0x11 // FILE_ALIGNMENT_INFO + FileIdInfo = 0x12 // FILE_ID_INFO + FileIdExtdDirectoryInfo = 0x13 // FILE_ID_EXTD_DIR_INFO + FileIdExtdDirectoryRestartInfo = 0x14 // FILE_ID_EXTD_DIR_INFO +) + +type FILE_ATTRIBUTE_TAG_INFO struct { + FileAttributes uint32 + ReparseTag uint32 +} + +//sys GetFileInformationByHandleEx(handle syscall.Handle, class uint32, info *byte, bufsize uint32) (err error) diff --git a/testing/internal/syscall/windows/syscall_windows.go b/testing/internal/syscall/windows/syscall_windows.go new file mode 100644 index 0000000..c848f92 --- /dev/null +++ b/testing/internal/syscall/windows/syscall_windows.go @@ -0,0 +1,537 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package windows + +import ( + "sync" + "syscall" + "unsafe" +) + +// CanUseLongPaths is true when the OS supports opting into +// proper long path handling without the need for fixups. +// +//go:linkname CanUseLongPaths +var CanUseLongPaths bool + +// UTF16PtrToString is like UTF16ToString, but takes *uint16 +// as a parameter instead of []uint16. +func UTF16PtrToString(p *uint16) string { + if p == nil { + return "" + } + end := unsafe.Pointer(p) + n := 0 + for *(*uint16)(end) != 0 { + end = unsafe.Pointer(uintptr(end) + unsafe.Sizeof(*p)) + n++ + } + return syscall.UTF16ToString(unsafe.Slice(p, n)) +} + +const ( + ERROR_BAD_LENGTH syscall.Errno = 24 + ERROR_SHARING_VIOLATION syscall.Errno = 32 + ERROR_LOCK_VIOLATION syscall.Errno = 33 + ERROR_NOT_SUPPORTED syscall.Errno = 50 + ERROR_CALL_NOT_IMPLEMENTED syscall.Errno = 120 + ERROR_INVALID_NAME syscall.Errno = 123 + ERROR_LOCK_FAILED syscall.Errno = 167 + ERROR_NO_TOKEN syscall.Errno = 1008 + ERROR_NO_UNICODE_TRANSLATION syscall.Errno = 1113 + ERROR_CANT_ACCESS_FILE syscall.Errno = 1920 +) + +const ( + GAA_FLAG_INCLUDE_PREFIX = 0x00000010 + GAA_FLAG_INCLUDE_GATEWAYS = 0x0080 +) + +const ( + IF_TYPE_OTHER = 1 + IF_TYPE_ETHERNET_CSMACD = 6 + IF_TYPE_ISO88025_TOKENRING = 9 + IF_TYPE_PPP = 23 + IF_TYPE_SOFTWARE_LOOPBACK = 24 + IF_TYPE_ATM = 37 + IF_TYPE_IEEE80211 = 71 + IF_TYPE_TUNNEL = 131 + IF_TYPE_IEEE1394 = 144 +) + +type SocketAddress struct { + Sockaddr *syscall.RawSockaddrAny + SockaddrLength int32 +} + +type IpAdapterUnicastAddress struct { + Length uint32 + Flags uint32 + Next *IpAdapterUnicastAddress + Address SocketAddress + PrefixOrigin int32 + SuffixOrigin int32 + DadState int32 + ValidLifetime uint32 + PreferredLifetime uint32 + LeaseLifetime uint32 + OnLinkPrefixLength uint8 +} + +type IpAdapterAnycastAddress struct { + Length uint32 + Flags uint32 + Next *IpAdapterAnycastAddress + Address SocketAddress +} + +type IpAdapterMulticastAddress struct { + Length uint32 + Flags uint32 + Next *IpAdapterMulticastAddress + Address SocketAddress +} + +type IpAdapterDnsServerAdapter struct { + Length uint32 + Reserved uint32 + Next *IpAdapterDnsServerAdapter + Address SocketAddress +} + +type IpAdapterPrefix struct { + Length uint32 + Flags uint32 + Next *IpAdapterPrefix + Address SocketAddress + PrefixLength uint32 +} + +type IpAdapterWinsServerAddress struct { + Length uint32 + Reserved uint32 + Next *IpAdapterWinsServerAddress + Address SocketAddress +} + +type IpAdapterGatewayAddress struct { + Length uint32 + Reserved uint32 + Next *IpAdapterGatewayAddress + Address SocketAddress +} + +type IpAdapterAddresses struct { + Length uint32 + IfIndex uint32 + Next *IpAdapterAddresses + AdapterName *byte + FirstUnicastAddress *IpAdapterUnicastAddress + FirstAnycastAddress *IpAdapterAnycastAddress + FirstMulticastAddress *IpAdapterMulticastAddress + FirstDnsServerAddress *IpAdapterDnsServerAdapter + DnsSuffix *uint16 + Description *uint16 + FriendlyName *uint16 + PhysicalAddress [syscall.MAX_ADAPTER_ADDRESS_LENGTH]byte + PhysicalAddressLength uint32 + Flags uint32 + Mtu uint32 + IfType uint32 + OperStatus uint32 + Ipv6IfIndex uint32 + ZoneIndices [16]uint32 + FirstPrefix *IpAdapterPrefix + TransmitLinkSpeed uint64 + ReceiveLinkSpeed uint64 + FirstWinsServerAddress *IpAdapterWinsServerAddress + FirstGatewayAddress *IpAdapterGatewayAddress + /* more fields might be present here. */ +} + +type SecurityAttributes struct { + Length uint16 + SecurityDescriptor uintptr + InheritHandle bool +} + +type FILE_BASIC_INFO struct { + CreationTime int64 + LastAccessTime int64 + LastWriteTime int64 + ChangedTime int64 + FileAttributes uint32 + + // Pad out to 8-byte alignment. + // + // Without this padding, TestChmod fails due to an argument validation error + // in SetFileInformationByHandle on windows/386. + // + // https://learn.microsoft.com/en-us/cpp/build/reference/zp-struct-member-alignment?view=msvc-170 + // says that “The C/C++ headers in the Windows SDK assume the platform's + // default alignment is used.” What we see here is padding rather than + // alignment, but maybe it is related. + _ uint32 +} + +const ( + IfOperStatusUp = 1 + IfOperStatusDown = 2 + IfOperStatusTesting = 3 + IfOperStatusUnknown = 4 + IfOperStatusDormant = 5 + IfOperStatusNotPresent = 6 + IfOperStatusLowerLayerDown = 7 +) + +//sys GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapterAddresses *IpAdapterAddresses, sizePointer *uint32) (errcode error) = iphlpapi.GetAdaptersAddresses +//sys GetComputerNameEx(nameformat uint32, buf *uint16, n *uint32) (err error) = GetComputerNameExW +//sys MoveFileEx(from *uint16, to *uint16, flags uint32) (err error) = MoveFileExW +//sys GetModuleFileName(module syscall.Handle, fn *uint16, len uint32) (n uint32, err error) = kernel32.GetModuleFileNameW +//sys SetFileInformationByHandle(handle syscall.Handle, fileInformationClass uint32, buf unsafe.Pointer, bufsize uint32) (err error) = kernel32.SetFileInformationByHandle +//sys VirtualQuery(address uintptr, buffer *MemoryBasicInformation, length uintptr) (err error) = kernel32.VirtualQuery +//sys GetTempPath2(buflen uint32, buf *uint16) (n uint32, err error) = GetTempPath2W + +const ( + // flags for CreateToolhelp32Snapshot + TH32CS_SNAPMODULE = 0x08 + TH32CS_SNAPMODULE32 = 0x10 +) + +const MAX_MODULE_NAME32 = 255 + +type ModuleEntry32 struct { + Size uint32 + ModuleID uint32 + ProcessID uint32 + GlblcntUsage uint32 + ProccntUsage uint32 + ModBaseAddr uintptr + ModBaseSize uint32 + ModuleHandle syscall.Handle + Module [MAX_MODULE_NAME32 + 1]uint16 + ExePath [syscall.MAX_PATH]uint16 +} + +const SizeofModuleEntry32 = unsafe.Sizeof(ModuleEntry32{}) + +//sys Module32First(snapshot syscall.Handle, moduleEntry *ModuleEntry32) (err error) = kernel32.Module32FirstW +//sys Module32Next(snapshot syscall.Handle, moduleEntry *ModuleEntry32) (err error) = kernel32.Module32NextW + +const ( + WSA_FLAG_OVERLAPPED = 0x01 + WSA_FLAG_NO_HANDLE_INHERIT = 0x80 + + WSAEINVAL syscall.Errno = 10022 + WSAEMSGSIZE syscall.Errno = 10040 + WSAEAFNOSUPPORT syscall.Errno = 10047 + + MSG_PEEK = 0x2 + MSG_TRUNC = 0x0100 + MSG_CTRUNC = 0x0200 + + socket_error = uintptr(^uint32(0)) +) + +var WSAID_WSASENDMSG = syscall.GUID{ + Data1: 0xa441e712, + Data2: 0x754f, + Data3: 0x43ca, + Data4: [8]byte{0x84, 0xa7, 0x0d, 0xee, 0x44, 0xcf, 0x60, 0x6d}, +} + +var WSAID_WSARECVMSG = syscall.GUID{ + Data1: 0xf689d7c8, + Data2: 0x6f1f, + Data3: 0x436b, + Data4: [8]byte{0x8a, 0x53, 0xe5, 0x4f, 0xe3, 0x51, 0xc3, 0x22}, +} + +var sendRecvMsgFunc struct { + once sync.Once + sendAddr uintptr + recvAddr uintptr + err error +} + +type WSAMsg struct { + Name syscall.Pointer + Namelen int32 + Buffers *syscall.WSABuf + BufferCount uint32 + Control syscall.WSABuf + Flags uint32 +} + +//sys WSASocket(af int32, typ int32, protocol int32, protinfo *syscall.WSAProtocolInfo, group uint32, flags uint32) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = ws2_32.WSASocketW +//sys WSAGetOverlappedResult(h syscall.Handle, o *syscall.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) = ws2_32.WSAGetOverlappedResult + +func loadWSASendRecvMsg() error { + sendRecvMsgFunc.once.Do(func() { + var s syscall.Handle + s, sendRecvMsgFunc.err = syscall.Socket(syscall.AF_INET, syscall.SOCK_DGRAM, syscall.IPPROTO_UDP) + if sendRecvMsgFunc.err != nil { + return + } + defer syscall.CloseHandle(s) + var n uint32 + sendRecvMsgFunc.err = syscall.WSAIoctl(s, + syscall.SIO_GET_EXTENSION_FUNCTION_POINTER, + (*byte)(unsafe.Pointer(&WSAID_WSARECVMSG)), + uint32(unsafe.Sizeof(WSAID_WSARECVMSG)), + (*byte)(unsafe.Pointer(&sendRecvMsgFunc.recvAddr)), + uint32(unsafe.Sizeof(sendRecvMsgFunc.recvAddr)), + &n, nil, 0) + if sendRecvMsgFunc.err != nil { + return + } + sendRecvMsgFunc.err = syscall.WSAIoctl(s, + syscall.SIO_GET_EXTENSION_FUNCTION_POINTER, + (*byte)(unsafe.Pointer(&WSAID_WSASENDMSG)), + uint32(unsafe.Sizeof(WSAID_WSASENDMSG)), + (*byte)(unsafe.Pointer(&sendRecvMsgFunc.sendAddr)), + uint32(unsafe.Sizeof(sendRecvMsgFunc.sendAddr)), + &n, nil, 0) + }) + return sendRecvMsgFunc.err +} + +func WSASendMsg(fd syscall.Handle, msg *WSAMsg, flags uint32, bytesSent *uint32, overlapped *syscall.Overlapped, croutine *byte) error { + err := loadWSASendRecvMsg() + if err != nil { + return err + } + r1, _, e1 := syscall.Syscall6(sendRecvMsgFunc.sendAddr, 6, uintptr(fd), uintptr(unsafe.Pointer(msg)), uintptr(flags), uintptr(unsafe.Pointer(bytesSent)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine))) + if r1 == socket_error { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return err +} + +func WSARecvMsg(fd syscall.Handle, msg *WSAMsg, bytesReceived *uint32, overlapped *syscall.Overlapped, croutine *byte) error { + err := loadWSASendRecvMsg() + if err != nil { + return err + } + r1, _, e1 := syscall.Syscall6(sendRecvMsgFunc.recvAddr, 5, uintptr(fd), uintptr(unsafe.Pointer(msg)), uintptr(unsafe.Pointer(bytesReceived)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)), 0) + if r1 == socket_error { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return err +} + +const ( + ComputerNameNetBIOS = 0 + ComputerNameDnsHostname = 1 + ComputerNameDnsDomain = 2 + ComputerNameDnsFullyQualified = 3 + ComputerNamePhysicalNetBIOS = 4 + ComputerNamePhysicalDnsHostname = 5 + ComputerNamePhysicalDnsDomain = 6 + ComputerNamePhysicalDnsFullyQualified = 7 + ComputerNameMax = 8 + + MOVEFILE_REPLACE_EXISTING = 0x1 + MOVEFILE_COPY_ALLOWED = 0x2 + MOVEFILE_DELAY_UNTIL_REBOOT = 0x4 + MOVEFILE_WRITE_THROUGH = 0x8 + MOVEFILE_CREATE_HARDLINK = 0x10 + MOVEFILE_FAIL_IF_NOT_TRACKABLE = 0x20 +) + +func Rename(oldpath, newpath string) error { + from, err := syscall.UTF16PtrFromString(oldpath) + if err != nil { + return err + } + to, err := syscall.UTF16PtrFromString(newpath) + if err != nil { + return err + } + return MoveFileEx(from, to, MOVEFILE_REPLACE_EXISTING) +} + +//sys LockFileEx(file syscall.Handle, flags uint32, reserved uint32, bytesLow uint32, bytesHigh uint32, overlapped *syscall.Overlapped) (err error) = kernel32.LockFileEx +//sys UnlockFileEx(file syscall.Handle, reserved uint32, bytesLow uint32, bytesHigh uint32, overlapped *syscall.Overlapped) (err error) = kernel32.UnlockFileEx + +const ( + LOCKFILE_FAIL_IMMEDIATELY = 0x00000001 + LOCKFILE_EXCLUSIVE_LOCK = 0x00000002 +) + +const MB_ERR_INVALID_CHARS = 8 + +//sys GetACP() (acp uint32) = kernel32.GetACP +//sys GetConsoleCP() (ccp uint32) = kernel32.GetConsoleCP +//sys MultiByteToWideChar(codePage uint32, dwFlags uint32, str *byte, nstr int32, wchar *uint16, nwchar int32) (nwrite int32, err error) = kernel32.MultiByteToWideChar +//sys GetCurrentThread() (pseudoHandle syscall.Handle, err error) = kernel32.GetCurrentThread + +// Constants from lmshare.h +const ( + STYPE_DISKTREE = 0x00 + STYPE_TEMPORARY = 0x40000000 +) + +type SHARE_INFO_2 struct { + Netname *uint16 + Type uint32 + Remark *uint16 + Permissions uint32 + MaxUses uint32 + CurrentUses uint32 + Path *uint16 + Passwd *uint16 +} + +//sys NetShareAdd(serverName *uint16, level uint32, buf *byte, parmErr *uint16) (neterr error) = netapi32.NetShareAdd +//sys NetShareDel(serverName *uint16, netName *uint16, reserved uint32) (neterr error) = netapi32.NetShareDel + +const ( + FILE_NAME_NORMALIZED = 0x0 + FILE_NAME_OPENED = 0x8 + + VOLUME_NAME_DOS = 0x0 + VOLUME_NAME_GUID = 0x1 + VOLUME_NAME_NONE = 0x4 + VOLUME_NAME_NT = 0x2 +) + +//sys GetFinalPathNameByHandle(file syscall.Handle, filePath *uint16, filePathSize uint32, flags uint32) (n uint32, err error) = kernel32.GetFinalPathNameByHandleW + +func ErrorLoadingGetTempPath2() error { + return procGetTempPath2W.Find() +} + +//sys CreateEnvironmentBlock(block **uint16, token syscall.Token, inheritExisting bool) (err error) = userenv.CreateEnvironmentBlock +//sys DestroyEnvironmentBlock(block *uint16) (err error) = userenv.DestroyEnvironmentBlock +//sys CreateEvent(eventAttrs *SecurityAttributes, manualReset uint32, initialState uint32, name *uint16) (handle syscall.Handle, err error) = kernel32.CreateEventW + +//sys ProcessPrng(buf []byte) (err error) = bcryptprimitives.ProcessPrng + +type FILE_ID_BOTH_DIR_INFO struct { + NextEntryOffset uint32 + FileIndex uint32 + CreationTime syscall.Filetime + LastAccessTime syscall.Filetime + LastWriteTime syscall.Filetime + ChangeTime syscall.Filetime + EndOfFile uint64 + AllocationSize uint64 + FileAttributes uint32 + FileNameLength uint32 + EaSize uint32 + ShortNameLength uint32 + ShortName [12]uint16 + FileID uint64 + FileName [1]uint16 +} + +type FILE_FULL_DIR_INFO struct { + NextEntryOffset uint32 + FileIndex uint32 + CreationTime syscall.Filetime + LastAccessTime syscall.Filetime + LastWriteTime syscall.Filetime + ChangeTime syscall.Filetime + EndOfFile uint64 + AllocationSize uint64 + FileAttributes uint32 + FileNameLength uint32 + EaSize uint32 + FileName [1]uint16 +} + +//sys GetVolumeInformationByHandle(file syscall.Handle, volumeNameBuffer *uint16, volumeNameSize uint32, volumeNameSerialNumber *uint32, maximumComponentLength *uint32, fileSystemFlags *uint32, fileSystemNameBuffer *uint16, fileSystemNameSize uint32) (err error) = GetVolumeInformationByHandleW +//sys GetVolumeNameForVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16, bufferlength uint32) (err error) = GetVolumeNameForVolumeMountPointW + +//sys RtlLookupFunctionEntry(pc uintptr, baseAddress *uintptr, table *byte) (ret uintptr) = kernel32.RtlLookupFunctionEntry +//sys RtlVirtualUnwind(handlerType uint32, baseAddress uintptr, pc uintptr, entry uintptr, ctxt uintptr, data *uintptr, frame *uintptr, ctxptrs *byte) (ret uintptr) = kernel32.RtlVirtualUnwind + +type SERVICE_STATUS struct { + ServiceType uint32 + CurrentState uint32 + ControlsAccepted uint32 + Win32ExitCode uint32 + ServiceSpecificExitCode uint32 + CheckPoint uint32 + WaitHint uint32 +} + +const ( + SERVICE_RUNNING = 4 + SERVICE_QUERY_STATUS = 4 +) + +//sys OpenService(mgr syscall.Handle, serviceName *uint16, access uint32) (handle syscall.Handle, err error) = advapi32.OpenServiceW +//sys QueryServiceStatus(hService syscall.Handle, lpServiceStatus *SERVICE_STATUS) (err error) = advapi32.QueryServiceStatus +//sys OpenSCManager(machineName *uint16, databaseName *uint16, access uint32) (handle syscall.Handle, err error) [failretval==0] = advapi32.OpenSCManagerW + +func FinalPath(h syscall.Handle, flags uint32) (string, error) { + buf := make([]uint16, 100) + for { + n, err := GetFinalPathNameByHandle(h, &buf[0], uint32(len(buf)), flags) + if err != nil { + return "", err + } + if n < uint32(len(buf)) { + break + } + buf = make([]uint16, n) + } + return syscall.UTF16ToString(buf), nil +} + +// QueryPerformanceCounter retrieves the current value of performance counter. +// +//go:linkname QueryPerformanceCounter +func QueryPerformanceCounter() int64 // Implemented in runtime package. + +// QueryPerformanceFrequency retrieves the frequency of the performance counter. +// The returned value is represented as counts per second. +// +//go:linkname QueryPerformanceFrequency +func QueryPerformanceFrequency() int64 // Implemented in runtime package. + +//sys GetModuleHandle(modulename *uint16) (handle syscall.Handle, err error) = kernel32.GetModuleHandleW + +// NTStatus corresponds with NTSTATUS, error values returned by ntdll.dll and +// other native functions. +type NTStatus uint32 + +func (s NTStatus) Errno() syscall.Errno { + return rtlNtStatusToDosErrorNoTeb(s) +} + +func langID(pri, sub uint16) uint32 { return uint32(sub)<<10 | uint32(pri) } + +func (s NTStatus) Error() string { + return s.Errno().Error() +} + +// x/sys/windows/mkerrors.bash can generate a complete list of NTStatus codes. +// +// At the moment, we only need a couple, so just put them here manually. +// If this list starts getting long, we should consider generating the full set. +const ( + STATUS_FILE_IS_A_DIRECTORY NTStatus = 0xC00000BA + STATUS_DIRECTORY_NOT_EMPTY NTStatus = 0xC0000101 + STATUS_NOT_A_DIRECTORY NTStatus = 0xC0000103 + STATUS_CANNOT_DELETE NTStatus = 0xC0000121 + STATUS_REPARSE_POINT_ENCOUNTERED NTStatus = 0xC000050B +) + +// NT Native APIs +//sys NtCreateFile(handle *syscall.Handle, access uint32, oa *OBJECT_ATTRIBUTES, iosb *IO_STATUS_BLOCK, allocationSize *int64, attributes uint32, share uint32, disposition uint32, options uint32, eabuffer uintptr, ealength uint32) (ntstatus error) = ntdll.NtCreateFile +//sys NtOpenFile(handle *syscall.Handle, access uint32, oa *OBJECT_ATTRIBUTES, iosb *IO_STATUS_BLOCK, share uint32, options uint32) (ntstatus error) = ntdll.NtOpenFile +//sys rtlNtStatusToDosErrorNoTeb(ntstatus NTStatus) (ret syscall.Errno) = ntdll.RtlNtStatusToDosErrorNoTeb +//sys NtSetInformationFile(handle syscall.Handle, iosb *IO_STATUS_BLOCK, inBuffer uintptr, inBufferLen uint32, class uint32) (ntstatus error) = ntdll.NtSetInformationFile diff --git a/testing/internal/syscall/windows/sysdll/sysdll.go b/testing/internal/syscall/windows/sysdll/sysdll.go new file mode 100644 index 0000000..e79fd19 --- /dev/null +++ b/testing/internal/syscall/windows/sysdll/sysdll.go @@ -0,0 +1,30 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build windows + +// Package sysdll is an internal leaf package that records and reports +// which Windows DLL names are used by Go itself. These DLLs are then +// only loaded from the System32 directory. See Issue 14959. +package sysdll + +// IsSystemDLL reports whether the named dll key (a base name, like +// "foo.dll") is a system DLL which should only be loaded from the +// Windows SYSTEM32 directory. +// +// Filenames are case sensitive, but that doesn't matter because +// the case registered with Add is also the same case used with +// LoadDLL later. +// +// It has no associated mutex and should only be mutated serially +// (currently: during init), and not concurrent with DLL loading. +var IsSystemDLL = map[string]bool{} + +// Add notes that dll is a system32 DLL which should only be loaded +// from the Windows SYSTEM32 directory. It returns its argument back, +// for ease of use in generated code. +func Add(dll string) string { + IsSystemDLL[dll] = true + return dll +} diff --git a/testing/internal/syscall/windows/types_windows.go b/testing/internal/syscall/windows/types_windows.go new file mode 100644 index 0000000..6ae37af --- /dev/null +++ b/testing/internal/syscall/windows/types_windows.go @@ -0,0 +1,218 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package windows + +import ( + "syscall" + "unsafe" +) + +// Socket related. +const ( + TCP_KEEPIDLE = 0x03 + TCP_KEEPCNT = 0x10 + TCP_KEEPINTVL = 0x11 +) + +const ( + FILE_READ_DATA = 0x00000001 + FILE_READ_ATTRIBUTES = 0x00000080 + FILE_READ_EA = 0x00000008 + FILE_WRITE_DATA = 0x00000002 + FILE_WRITE_ATTRIBUTES = 0x00000100 + FILE_WRITE_EA = 0x00000010 + FILE_APPEND_DATA = 0x00000004 + FILE_EXECUTE = 0x00000020 + + FILE_GENERIC_READ = STANDARD_RIGHTS_READ | FILE_READ_DATA | FILE_READ_ATTRIBUTES | FILE_READ_EA | SYNCHRONIZE + FILE_GENERIC_WRITE = STANDARD_RIGHTS_WRITE | FILE_WRITE_DATA | FILE_WRITE_ATTRIBUTES | FILE_WRITE_EA | FILE_APPEND_DATA | SYNCHRONIZE + FILE_GENERIC_EXECUTE = STANDARD_RIGHTS_EXECUTE | FILE_READ_ATTRIBUTES | FILE_EXECUTE | SYNCHRONIZE + + FILE_LIST_DIRECTORY = 0x00000001 + FILE_TRAVERSE = 0x00000020 + + FILE_SHARE_READ = 0x00000001 + FILE_SHARE_WRITE = 0x00000002 + FILE_SHARE_DELETE = 0x00000004 + FILE_ATTRIBUTE_READONLY = 0x00000001 + FILE_ATTRIBUTE_HIDDEN = 0x00000002 + FILE_ATTRIBUTE_SYSTEM = 0x00000004 + FILE_ATTRIBUTE_DIRECTORY = 0x00000010 + FILE_ATTRIBUTE_ARCHIVE = 0x00000020 + FILE_ATTRIBUTE_DEVICE = 0x00000040 + FILE_ATTRIBUTE_NORMAL = 0x00000080 + FILE_ATTRIBUTE_TEMPORARY = 0x00000100 + FILE_ATTRIBUTE_SPARSE_FILE = 0x00000200 + FILE_ATTRIBUTE_REPARSE_POINT = 0x00000400 + FILE_ATTRIBUTE_COMPRESSED = 0x00000800 + FILE_ATTRIBUTE_OFFLINE = 0x00001000 + FILE_ATTRIBUTE_NOT_CONTENT_INDEXED = 0x00002000 + FILE_ATTRIBUTE_ENCRYPTED = 0x00004000 + FILE_ATTRIBUTE_INTEGRITY_STREAM = 0x00008000 + FILE_ATTRIBUTE_VIRTUAL = 0x00010000 + FILE_ATTRIBUTE_NO_SCRUB_DATA = 0x00020000 + FILE_ATTRIBUTE_RECALL_ON_OPEN = 0x00040000 + FILE_ATTRIBUTE_RECALL_ON_DATA_ACCESS = 0x00400000 + + INVALID_FILE_ATTRIBUTES = 0xffffffff +) + +// https://learn.microsoft.com/en-us/windows/win32/secauthz/access-mask +type ACCESS_MASK uint32 + +// Constants for type ACCESS_MASK +const ( + DELETE = 0x00010000 + READ_CONTROL = 0x00020000 + WRITE_DAC = 0x00040000 + WRITE_OWNER = 0x00080000 + SYNCHRONIZE = 0x00100000 + STANDARD_RIGHTS_REQUIRED = 0x000F0000 + STANDARD_RIGHTS_READ = READ_CONTROL + STANDARD_RIGHTS_WRITE = READ_CONTROL + STANDARD_RIGHTS_EXECUTE = READ_CONTROL + STANDARD_RIGHTS_ALL = 0x001F0000 + SPECIFIC_RIGHTS_ALL = 0x0000FFFF + ACCESS_SYSTEM_SECURITY = 0x01000000 + MAXIMUM_ALLOWED = 0x02000000 + GENERIC_READ = 0x80000000 + GENERIC_WRITE = 0x40000000 + GENERIC_EXECUTE = 0x20000000 + GENERIC_ALL = 0x10000000 +) + +// https://learn.microsoft.com/en-us/windows-hardware/drivers/ddi/wdm/ns-wdm-_acl +type ACL struct { + AclRevision byte + Sbz1 byte + AclSize uint16 + AceCount uint16 + Sbz2 uint16 +} + +// https://learn.microsoft.com/en-us/windows-hardware/drivers/ddi/wdm/ns-wdm-_io_status_block +type IO_STATUS_BLOCK struct { + Status NTStatus + Information uintptr +} + +// https://learn.microsoft.com/en-us/windows/win32/api/ntdef/ns-ntdef-_object_attributes +type OBJECT_ATTRIBUTES struct { + Length uint32 + RootDirectory syscall.Handle + ObjectName *NTUnicodeString + Attributes uint32 + SecurityDescriptor *SECURITY_DESCRIPTOR + SecurityQoS *SECURITY_QUALITY_OF_SERVICE +} + +// init sets o's RootDirectory, ObjectName, and Length. +func (o *OBJECT_ATTRIBUTES) init(root syscall.Handle, name string) error { + if name == "." { + name = "" + } + objectName, err := NewNTUnicodeString(name) + if err != nil { + return err + } + o.ObjectName = objectName + if root != syscall.InvalidHandle { + o.RootDirectory = root + } + o.Length = uint32(unsafe.Sizeof(*o)) + return nil +} + +// Values for the Attributes member of OBJECT_ATTRIBUTES. +const ( + OBJ_INHERIT = 0x00000002 + OBJ_PERMANENT = 0x00000010 + OBJ_EXCLUSIVE = 0x00000020 + OBJ_CASE_INSENSITIVE = 0x00000040 + OBJ_OPENIF = 0x00000080 + OBJ_OPENLINK = 0x00000100 + OBJ_KERNEL_HANDLE = 0x00000200 + OBJ_FORCE_ACCESS_CHECK = 0x00000400 + OBJ_IGNORE_IMPERSONATED_DEVICEMAP = 0x00000800 + OBJ_DONT_REPARSE = 0x00001000 + OBJ_VALID_ATTRIBUTES = 0x00001FF2 +) + +// https://learn.microsoft.com/en-us/windows-hardware/drivers/ddi/ntifs/ns-ntifs-_security_descriptor +type SECURITY_DESCRIPTOR struct { + revision byte + sbz1 byte + control SECURITY_DESCRIPTOR_CONTROL + owner *syscall.SID + group *syscall.SID + sacl *ACL + dacl *ACL +} + +// https://learn.microsoft.com/en-us/windows-hardware/drivers/ifs/security-descriptor-control +type SECURITY_DESCRIPTOR_CONTROL uint16 + +// https://learn.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-security_quality_of_service +type SECURITY_QUALITY_OF_SERVICE struct { + Length uint32 + ImpersonationLevel uint32 // type SECURITY_IMPERSONATION_LEVEL + ContextTrackingMode byte // type SECURITY_CONTEXT_TRACKING_MODE + EffectiveOnly byte +} + +const ( + // CreateDisposition flags for NtCreateFile and NtCreateNamedPipeFile. + FILE_SUPERSEDE = 0x00000000 + FILE_OPEN = 0x00000001 + FILE_CREATE = 0x00000002 + FILE_OPEN_IF = 0x00000003 + FILE_OVERWRITE = 0x00000004 + FILE_OVERWRITE_IF = 0x00000005 + FILE_MAXIMUM_DISPOSITION = 0x00000005 + + // CreateOptions flags for NtCreateFile and NtCreateNamedPipeFile. + FILE_DIRECTORY_FILE = 0x00000001 + FILE_WRITE_THROUGH = 0x00000002 + FILE_SEQUENTIAL_ONLY = 0x00000004 + FILE_NO_INTERMEDIATE_BUFFERING = 0x00000008 + FILE_SYNCHRONOUS_IO_ALERT = 0x00000010 + FILE_SYNCHRONOUS_IO_NONALERT = 0x00000020 + FILE_NON_DIRECTORY_FILE = 0x00000040 + FILE_CREATE_TREE_CONNECTION = 0x00000080 + FILE_COMPLETE_IF_OPLOCKED = 0x00000100 + FILE_NO_EA_KNOWLEDGE = 0x00000200 + FILE_OPEN_REMOTE_INSTANCE = 0x00000400 + FILE_RANDOM_ACCESS = 0x00000800 + FILE_DELETE_ON_CLOSE = 0x00001000 + FILE_OPEN_BY_FILE_ID = 0x00002000 + FILE_OPEN_FOR_BACKUP_INTENT = 0x00004000 + FILE_NO_COMPRESSION = 0x00008000 + FILE_OPEN_REQUIRING_OPLOCK = 0x00010000 + FILE_DISALLOW_EXCLUSIVE = 0x00020000 + FILE_RESERVE_OPFILTER = 0x00100000 + FILE_OPEN_REPARSE_POINT = 0x00200000 + FILE_OPEN_NO_RECALL = 0x00400000 + FILE_OPEN_FOR_FREE_SPACE_QUERY = 0x00800000 +) + +// https://learn.microsoft.com/en-us/windows-hardware/drivers/ddi/ntddk/ns-ntddk-_file_disposition_information +type FILE_DISPOSITION_INFORMATION struct { + DeleteFile bool +} + +// https://learn.microsoft.com/en-us/windows-hardware/drivers/ddi/ntddk/ns-ntddk-_file_disposition_information_ex +type FILE_DISPOSITION_INFORMATION_EX struct { + Flags uint32 +} + +// https://learn.microsoft.com/en-us/windows-hardware/drivers/ddi/ntddk/ns-ntddk-_file_disposition_information_ex +const ( + FILE_DISPOSITION_DO_NOT_DELETE = 0x00000000 + FILE_DISPOSITION_DELETE = 0x00000001 + FILE_DISPOSITION_POSIX_SEMANTICS = 0x00000002 + FILE_DISPOSITION_FORCE_IMAGE_SECTION_CHECK = 0x00000004 + FILE_DISPOSITION_ON_CLOSE = 0x00000008 + FILE_DISPOSITION_IGNORE_READONLY_ATTRIBUTE = 0x00000010 +) diff --git a/testing/internal/syscall/windows/version_windows.go b/testing/internal/syscall/windows/version_windows.go new file mode 100644 index 0000000..cb5f6ba --- /dev/null +++ b/testing/internal/syscall/windows/version_windows.go @@ -0,0 +1,113 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package windows + +import ( + "errors" + "sync" + "syscall" + "unsafe" +) + +// https://learn.microsoft.com/en-us/windows-hardware/drivers/ddi/wdm/ns-wdm-_osversioninfow +type _OSVERSIONINFOW struct { + osVersionInfoSize uint32 + majorVersion uint32 + minorVersion uint32 + buildNumber uint32 + platformId uint32 + csdVersion [128]uint16 +} + +// According to documentation, RtlGetVersion function always succeeds. +//sys rtlGetVersion(info *_OSVERSIONINFOW) = ntdll.RtlGetVersion + +// Version retrieves the major, minor, and build version numbers +// of the current Windows OS from the RtlGetVersion API. +func Version() (major, minor, build uint32) { + info := _OSVERSIONINFOW{} + info.osVersionInfoSize = uint32(unsafe.Sizeof(info)) + rtlGetVersion(&info) + return info.majorVersion, info.minorVersion, info.buildNumber +} + +var ( + supportTCPKeepAliveIdle bool + supportTCPKeepAliveInterval bool + supportTCPKeepAliveCount bool +) + +var initTCPKeepAlive = sync.OnceFunc(func() { + s, err := WSASocket(syscall.AF_INET, syscall.SOCK_STREAM, syscall.IPPROTO_TCP, nil, 0, WSA_FLAG_NO_HANDLE_INHERIT) + if err != nil { + // Fallback to checking the Windows version. + major, _, build := Version() + supportTCPKeepAliveIdle = major >= 10 && build >= 16299 + supportTCPKeepAliveInterval = major >= 10 && build >= 16299 + supportTCPKeepAliveCount = major >= 10 && build >= 15063 + return + } + defer syscall.Closesocket(s) + var optSupported = func(opt int) bool { + err := syscall.SetsockoptInt(s, syscall.IPPROTO_TCP, opt, 1) + return !errors.Is(err, syscall.WSAENOPROTOOPT) + } + supportTCPKeepAliveIdle = optSupported(TCP_KEEPIDLE) + supportTCPKeepAliveInterval = optSupported(TCP_KEEPINTVL) + supportTCPKeepAliveCount = optSupported(TCP_KEEPCNT) +}) + +// SupportTCPKeepAliveIdle indicates whether TCP_KEEPIDLE is supported. +// The minimal requirement is Windows 10.0.16299. +func SupportTCPKeepAliveIdle() bool { + initTCPKeepAlive() + return supportTCPKeepAliveIdle +} + +// SupportTCPKeepAliveInterval indicates whether TCP_KEEPINTVL is supported. +// The minimal requirement is Windows 10.0.16299. +func SupportTCPKeepAliveInterval() bool { + initTCPKeepAlive() + return supportTCPKeepAliveInterval +} + +// SupportTCPKeepAliveCount indicates whether TCP_KEEPCNT is supported. +// supports TCP_KEEPCNT. +// The minimal requirement is Windows 10.0.15063. +func SupportTCPKeepAliveCount() bool { + initTCPKeepAlive() + return supportTCPKeepAliveCount +} + +// SupportTCPInitialRTONoSYNRetransmissions indicates whether the current +// Windows version supports the TCP_INITIAL_RTO_NO_SYN_RETRANSMISSIONS. +// The minimal requirement is Windows 10.0.16299. +var SupportTCPInitialRTONoSYNRetransmissions = sync.OnceValue(func() bool { + major, _, build := Version() + return major >= 10 && build >= 16299 +}) + +// SupportUnixSocket indicates whether the current Windows version supports +// Unix Domain Sockets. +// The minimal requirement is Windows 10.0.17063. +var SupportUnixSocket = sync.OnceValue(func() bool { + var size uint32 + // First call to get the required buffer size in bytes. + // Ignore the error, it will always fail. + _, _ = syscall.WSAEnumProtocols(nil, nil, &size) + n := int32(size) / int32(unsafe.Sizeof(syscall.WSAProtocolInfo{})) + // Second call to get the actual protocols. + buf := make([]syscall.WSAProtocolInfo, n) + n, err := syscall.WSAEnumProtocols(nil, &buf[0], &size) + if err != nil { + return false + } + for i := int32(0); i < n; i++ { + if buf[i].AddressFamily == syscall.AF_UNIX { + return true + } + } + return false +}) diff --git a/testing/internal/syscall/windows/version_windows_test.go b/testing/internal/syscall/windows/version_windows_test.go new file mode 100644 index 0000000..655f4d2 --- /dev/null +++ b/testing/internal/syscall/windows/version_windows_test.go @@ -0,0 +1,32 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package windows_test + +import ( + "errors" + "syscall" + "testing" + + "github.com/CodSpeedHQ/codspeed-go/testing/internal/syscall/windows" +) + +func TestSupportUnixSocket(t *testing.T) { + var d syscall.WSAData + if err := syscall.WSAStartup(uint32(0x202), &d); err != nil { + t.Fatal(err) + } + defer syscall.WSACleanup() + + // Test that SupportUnixSocket returns true if WSASocket succeeds with AF_UNIX. + got := windows.SupportUnixSocket() + s, err := windows.WSASocket(syscall.AF_UNIX, syscall.SOCK_STREAM, 0, nil, 0, windows.WSA_FLAG_NO_HANDLE_INHERIT) + if err == nil { + syscall.Closesocket(s) + } + want := !errors.Is(err, windows.WSAEAFNOSUPPORT) && !errors.Is(err, windows.WSAEINVAL) + if want != got { + t.Errorf("SupportUnixSocket = %v; want %v", got, want) + } +} diff --git a/testing/internal/syscall/windows/zsyscall_windows.go b/testing/internal/syscall/windows/zsyscall_windows.go new file mode 100644 index 0000000..00a5356 --- /dev/null +++ b/testing/internal/syscall/windows/zsyscall_windows.go @@ -0,0 +1,564 @@ +// Code generated by 'go generate'; DO NOT EDIT. + +package windows + +import ( + "github.com/CodSpeedHQ/codspeed-go/testing/internal/syscall/windows/sysdll" + "syscall" + "unsafe" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) + errERROR_EINVAL error = syscall.EINVAL +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return errERROR_EINVAL + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modadvapi32 = syscall.NewLazyDLL(sysdll.Add("advapi32.dll")) + modbcryptprimitives = syscall.NewLazyDLL(sysdll.Add("bcryptprimitives.dll")) + modiphlpapi = syscall.NewLazyDLL(sysdll.Add("iphlpapi.dll")) + modkernel32 = syscall.NewLazyDLL(sysdll.Add("kernel32.dll")) + modnetapi32 = syscall.NewLazyDLL(sysdll.Add("netapi32.dll")) + modntdll = syscall.NewLazyDLL(sysdll.Add("ntdll.dll")) + modpsapi = syscall.NewLazyDLL(sysdll.Add("psapi.dll")) + moduserenv = syscall.NewLazyDLL(sysdll.Add("userenv.dll")) + modws2_32 = syscall.NewLazyDLL(sysdll.Add("ws2_32.dll")) + + procAdjustTokenPrivileges = modadvapi32.NewProc("AdjustTokenPrivileges") + procDuplicateTokenEx = modadvapi32.NewProc("DuplicateTokenEx") + procGetSidIdentifierAuthority = modadvapi32.NewProc("GetSidIdentifierAuthority") + procGetSidSubAuthority = modadvapi32.NewProc("GetSidSubAuthority") + procGetSidSubAuthorityCount = modadvapi32.NewProc("GetSidSubAuthorityCount") + procImpersonateLoggedOnUser = modadvapi32.NewProc("ImpersonateLoggedOnUser") + procImpersonateSelf = modadvapi32.NewProc("ImpersonateSelf") + procIsValidSid = modadvapi32.NewProc("IsValidSid") + procLogonUserW = modadvapi32.NewProc("LogonUserW") + procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW") + procOpenSCManagerW = modadvapi32.NewProc("OpenSCManagerW") + procOpenServiceW = modadvapi32.NewProc("OpenServiceW") + procOpenThreadToken = modadvapi32.NewProc("OpenThreadToken") + procQueryServiceStatus = modadvapi32.NewProc("QueryServiceStatus") + procRevertToSelf = modadvapi32.NewProc("RevertToSelf") + procSetTokenInformation = modadvapi32.NewProc("SetTokenInformation") + procProcessPrng = modbcryptprimitives.NewProc("ProcessPrng") + procGetAdaptersAddresses = modiphlpapi.NewProc("GetAdaptersAddresses") + procCreateEventW = modkernel32.NewProc("CreateEventW") + procGetACP = modkernel32.NewProc("GetACP") + procGetComputerNameExW = modkernel32.NewProc("GetComputerNameExW") + procGetConsoleCP = modkernel32.NewProc("GetConsoleCP") + procGetCurrentThread = modkernel32.NewProc("GetCurrentThread") + procGetFileInformationByHandleEx = modkernel32.NewProc("GetFileInformationByHandleEx") + procGetFinalPathNameByHandleW = modkernel32.NewProc("GetFinalPathNameByHandleW") + procGetModuleFileNameW = modkernel32.NewProc("GetModuleFileNameW") + procGetModuleHandleW = modkernel32.NewProc("GetModuleHandleW") + procGetTempPath2W = modkernel32.NewProc("GetTempPath2W") + procGetVolumeInformationByHandleW = modkernel32.NewProc("GetVolumeInformationByHandleW") + procGetVolumeNameForVolumeMountPointW = modkernel32.NewProc("GetVolumeNameForVolumeMountPointW") + procLockFileEx = modkernel32.NewProc("LockFileEx") + procModule32FirstW = modkernel32.NewProc("Module32FirstW") + procModule32NextW = modkernel32.NewProc("Module32NextW") + procMoveFileExW = modkernel32.NewProc("MoveFileExW") + procMultiByteToWideChar = modkernel32.NewProc("MultiByteToWideChar") + procRtlLookupFunctionEntry = modkernel32.NewProc("RtlLookupFunctionEntry") + procRtlVirtualUnwind = modkernel32.NewProc("RtlVirtualUnwind") + procSetFileInformationByHandle = modkernel32.NewProc("SetFileInformationByHandle") + procUnlockFileEx = modkernel32.NewProc("UnlockFileEx") + procVirtualQuery = modkernel32.NewProc("VirtualQuery") + procNetShareAdd = modnetapi32.NewProc("NetShareAdd") + procNetShareDel = modnetapi32.NewProc("NetShareDel") + procNetUserAdd = modnetapi32.NewProc("NetUserAdd") + procNetUserDel = modnetapi32.NewProc("NetUserDel") + procNetUserGetLocalGroups = modnetapi32.NewProc("NetUserGetLocalGroups") + procNtCreateFile = modntdll.NewProc("NtCreateFile") + procNtOpenFile = modntdll.NewProc("NtOpenFile") + procNtSetInformationFile = modntdll.NewProc("NtSetInformationFile") + procRtlGetVersion = modntdll.NewProc("RtlGetVersion") + procRtlNtStatusToDosErrorNoTeb = modntdll.NewProc("RtlNtStatusToDosErrorNoTeb") + procGetProcessMemoryInfo = modpsapi.NewProc("GetProcessMemoryInfo") + procCreateEnvironmentBlock = moduserenv.NewProc("CreateEnvironmentBlock") + procDestroyEnvironmentBlock = moduserenv.NewProc("DestroyEnvironmentBlock") + procGetProfilesDirectoryW = moduserenv.NewProc("GetProfilesDirectoryW") + procWSAGetOverlappedResult = modws2_32.NewProc("WSAGetOverlappedResult") + procWSASocketW = modws2_32.NewProc("WSASocketW") +) + +func adjustTokenPrivileges(token syscall.Token, disableAllPrivileges bool, newstate *TOKEN_PRIVILEGES, buflen uint32, prevstate *TOKEN_PRIVILEGES, returnlen *uint32) (ret uint32, err error) { + var _p0 uint32 + if disableAllPrivileges { + _p0 = 1 + } + r0, _, e1 := syscall.Syscall6(procAdjustTokenPrivileges.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(newstate)), uintptr(buflen), uintptr(unsafe.Pointer(prevstate)), uintptr(unsafe.Pointer(returnlen))) + ret = uint32(r0) + if true { + err = errnoErr(e1) + } + return +} + +func DuplicateTokenEx(hExistingToken syscall.Token, dwDesiredAccess uint32, lpTokenAttributes *syscall.SecurityAttributes, impersonationLevel uint32, tokenType TokenType, phNewToken *syscall.Token) (err error) { + r1, _, e1 := syscall.Syscall6(procDuplicateTokenEx.Addr(), 6, uintptr(hExistingToken), uintptr(dwDesiredAccess), uintptr(unsafe.Pointer(lpTokenAttributes)), uintptr(impersonationLevel), uintptr(tokenType), uintptr(unsafe.Pointer(phNewToken))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func getSidIdentifierAuthority(sid *syscall.SID) (idauth uintptr) { + r0, _, _ := syscall.Syscall(procGetSidIdentifierAuthority.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) + idauth = uintptr(r0) + return +} + +func getSidSubAuthority(sid *syscall.SID, subAuthorityIdx uint32) (subAuth uintptr) { + r0, _, _ := syscall.Syscall(procGetSidSubAuthority.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(subAuthorityIdx), 0) + subAuth = uintptr(r0) + return +} + +func getSidSubAuthorityCount(sid *syscall.SID) (count uintptr) { + r0, _, _ := syscall.Syscall(procGetSidSubAuthorityCount.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) + count = uintptr(r0) + return +} + +func ImpersonateLoggedOnUser(token syscall.Token) (err error) { + r1, _, e1 := syscall.Syscall(procImpersonateLoggedOnUser.Addr(), 1, uintptr(token), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func ImpersonateSelf(impersonationlevel uint32) (err error) { + r1, _, e1 := syscall.Syscall(procImpersonateSelf.Addr(), 1, uintptr(impersonationlevel), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func IsValidSid(sid *syscall.SID) (valid bool) { + r0, _, _ := syscall.Syscall(procIsValidSid.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) + valid = r0 != 0 + return +} + +func LogonUser(username *uint16, domain *uint16, password *uint16, logonType uint32, logonProvider uint32, token *syscall.Token) (err error) { + r1, _, e1 := syscall.Syscall6(procLogonUserW.Addr(), 6, uintptr(unsafe.Pointer(username)), uintptr(unsafe.Pointer(domain)), uintptr(unsafe.Pointer(password)), uintptr(logonType), uintptr(logonProvider), uintptr(unsafe.Pointer(token))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func LookupPrivilegeValue(systemname *uint16, name *uint16, luid *LUID) (err error) { + r1, _, e1 := syscall.Syscall(procLookupPrivilegeValueW.Addr(), 3, uintptr(unsafe.Pointer(systemname)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func OpenSCManager(machineName *uint16, databaseName *uint16, access uint32) (handle syscall.Handle, err error) { + r0, _, e1 := syscall.Syscall(procOpenSCManagerW.Addr(), 3, uintptr(unsafe.Pointer(machineName)), uintptr(unsafe.Pointer(databaseName)), uintptr(access)) + handle = syscall.Handle(r0) + if handle == 0 { + err = errnoErr(e1) + } + return +} + +func OpenService(mgr syscall.Handle, serviceName *uint16, access uint32) (handle syscall.Handle, err error) { + r0, _, e1 := syscall.Syscall(procOpenServiceW.Addr(), 3, uintptr(mgr), uintptr(unsafe.Pointer(serviceName)), uintptr(access)) + handle = syscall.Handle(r0) + if handle == 0 { + err = errnoErr(e1) + } + return +} + +func OpenThreadToken(h syscall.Handle, access uint32, openasself bool, token *syscall.Token) (err error) { + var _p0 uint32 + if openasself { + _p0 = 1 + } + r1, _, e1 := syscall.Syscall6(procOpenThreadToken.Addr(), 4, uintptr(h), uintptr(access), uintptr(_p0), uintptr(unsafe.Pointer(token)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func QueryServiceStatus(hService syscall.Handle, lpServiceStatus *SERVICE_STATUS) (err error) { + r1, _, e1 := syscall.Syscall(procQueryServiceStatus.Addr(), 2, uintptr(hService), uintptr(unsafe.Pointer(lpServiceStatus)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func RevertToSelf() (err error) { + r1, _, e1 := syscall.Syscall(procRevertToSelf.Addr(), 0, 0, 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetTokenInformation(tokenHandle syscall.Token, tokenInformationClass uint32, tokenInformation uintptr, tokenInformationLength uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procSetTokenInformation.Addr(), 4, uintptr(tokenHandle), uintptr(tokenInformationClass), uintptr(tokenInformation), uintptr(tokenInformationLength), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func ProcessPrng(buf []byte) (err error) { + var _p0 *byte + if len(buf) > 0 { + _p0 = &buf[0] + } + r1, _, e1 := syscall.Syscall(procProcessPrng.Addr(), 2, uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapterAddresses *IpAdapterAddresses, sizePointer *uint32) (errcode error) { + r0, _, _ := syscall.Syscall6(procGetAdaptersAddresses.Addr(), 5, uintptr(family), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(adapterAddresses)), uintptr(unsafe.Pointer(sizePointer)), 0) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + +func CreateEvent(eventAttrs *SecurityAttributes, manualReset uint32, initialState uint32, name *uint16) (handle syscall.Handle, err error) { + r0, _, e1 := syscall.Syscall6(procCreateEventW.Addr(), 4, uintptr(unsafe.Pointer(eventAttrs)), uintptr(manualReset), uintptr(initialState), uintptr(unsafe.Pointer(name)), 0, 0) + handle = syscall.Handle(r0) + if handle == 0 { + err = errnoErr(e1) + } + return +} + +func GetACP() (acp uint32) { + r0, _, _ := syscall.Syscall(procGetACP.Addr(), 0, 0, 0, 0) + acp = uint32(r0) + return +} + +func GetComputerNameEx(nameformat uint32, buf *uint16, n *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetComputerNameExW.Addr(), 3, uintptr(nameformat), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(n))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func GetConsoleCP() (ccp uint32) { + r0, _, _ := syscall.Syscall(procGetConsoleCP.Addr(), 0, 0, 0, 0) + ccp = uint32(r0) + return +} + +func GetCurrentThread() (pseudoHandle syscall.Handle, err error) { + r0, _, e1 := syscall.Syscall(procGetCurrentThread.Addr(), 0, 0, 0, 0) + pseudoHandle = syscall.Handle(r0) + if pseudoHandle == 0 { + err = errnoErr(e1) + } + return +} + +func GetFileInformationByHandleEx(handle syscall.Handle, class uint32, info *byte, bufsize uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(), 4, uintptr(handle), uintptr(class), uintptr(unsafe.Pointer(info)), uintptr(bufsize), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func GetFinalPathNameByHandle(file syscall.Handle, filePath *uint16, filePathSize uint32, flags uint32) (n uint32, err error) { + r0, _, e1 := syscall.Syscall6(procGetFinalPathNameByHandleW.Addr(), 4, uintptr(file), uintptr(unsafe.Pointer(filePath)), uintptr(filePathSize), uintptr(flags), 0, 0) + n = uint32(r0) + if n == 0 { + err = errnoErr(e1) + } + return +} + +func GetModuleFileName(module syscall.Handle, fn *uint16, len uint32) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetModuleFileNameW.Addr(), 3, uintptr(module), uintptr(unsafe.Pointer(fn)), uintptr(len)) + n = uint32(r0) + if n == 0 { + err = errnoErr(e1) + } + return +} + +func GetModuleHandle(modulename *uint16) (handle syscall.Handle, err error) { + r0, _, e1 := syscall.Syscall(procGetModuleHandleW.Addr(), 1, uintptr(unsafe.Pointer(modulename)), 0, 0) + handle = syscall.Handle(r0) + if handle == 0 { + err = errnoErr(e1) + } + return +} + +func GetTempPath2(buflen uint32, buf *uint16) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetTempPath2W.Addr(), 2, uintptr(buflen), uintptr(unsafe.Pointer(buf)), 0) + n = uint32(r0) + if n == 0 { + err = errnoErr(e1) + } + return +} + +func GetVolumeInformationByHandle(file syscall.Handle, volumeNameBuffer *uint16, volumeNameSize uint32, volumeNameSerialNumber *uint32, maximumComponentLength *uint32, fileSystemFlags *uint32, fileSystemNameBuffer *uint16, fileSystemNameSize uint32) (err error) { + r1, _, e1 := syscall.Syscall9(procGetVolumeInformationByHandleW.Addr(), 8, uintptr(file), uintptr(unsafe.Pointer(volumeNameBuffer)), uintptr(volumeNameSize), uintptr(unsafe.Pointer(volumeNameSerialNumber)), uintptr(unsafe.Pointer(maximumComponentLength)), uintptr(unsafe.Pointer(fileSystemFlags)), uintptr(unsafe.Pointer(fileSystemNameBuffer)), uintptr(fileSystemNameSize), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func GetVolumeNameForVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16, bufferlength uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetVolumeNameForVolumeMountPointW.Addr(), 3, uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(unsafe.Pointer(volumeName)), uintptr(bufferlength)) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func LockFileEx(file syscall.Handle, flags uint32, reserved uint32, bytesLow uint32, bytesHigh uint32, overlapped *syscall.Overlapped) (err error) { + r1, _, e1 := syscall.Syscall6(procLockFileEx.Addr(), 6, uintptr(file), uintptr(flags), uintptr(reserved), uintptr(bytesLow), uintptr(bytesHigh), uintptr(unsafe.Pointer(overlapped))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func Module32First(snapshot syscall.Handle, moduleEntry *ModuleEntry32) (err error) { + r1, _, e1 := syscall.Syscall(procModule32FirstW.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(moduleEntry)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func Module32Next(snapshot syscall.Handle, moduleEntry *ModuleEntry32) (err error) { + r1, _, e1 := syscall.Syscall(procModule32NextW.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(moduleEntry)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func MoveFileEx(from *uint16, to *uint16, flags uint32) (err error) { + r1, _, e1 := syscall.Syscall(procMoveFileExW.Addr(), 3, uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)), uintptr(flags)) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func MultiByteToWideChar(codePage uint32, dwFlags uint32, str *byte, nstr int32, wchar *uint16, nwchar int32) (nwrite int32, err error) { + r0, _, e1 := syscall.Syscall6(procMultiByteToWideChar.Addr(), 6, uintptr(codePage), uintptr(dwFlags), uintptr(unsafe.Pointer(str)), uintptr(nstr), uintptr(unsafe.Pointer(wchar)), uintptr(nwchar)) + nwrite = int32(r0) + if nwrite == 0 { + err = errnoErr(e1) + } + return +} + +func RtlLookupFunctionEntry(pc uintptr, baseAddress *uintptr, table *byte) (ret uintptr) { + r0, _, _ := syscall.Syscall(procRtlLookupFunctionEntry.Addr(), 3, uintptr(pc), uintptr(unsafe.Pointer(baseAddress)), uintptr(unsafe.Pointer(table))) + ret = uintptr(r0) + return +} + +func RtlVirtualUnwind(handlerType uint32, baseAddress uintptr, pc uintptr, entry uintptr, ctxt uintptr, data *uintptr, frame *uintptr, ctxptrs *byte) (ret uintptr) { + r0, _, _ := syscall.Syscall9(procRtlVirtualUnwind.Addr(), 8, uintptr(handlerType), uintptr(baseAddress), uintptr(pc), uintptr(entry), uintptr(ctxt), uintptr(unsafe.Pointer(data)), uintptr(unsafe.Pointer(frame)), uintptr(unsafe.Pointer(ctxptrs)), 0) + ret = uintptr(r0) + return +} + +func SetFileInformationByHandle(handle syscall.Handle, fileInformationClass uint32, buf unsafe.Pointer, bufsize uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procSetFileInformationByHandle.Addr(), 4, uintptr(handle), uintptr(fileInformationClass), uintptr(buf), uintptr(bufsize), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func UnlockFileEx(file syscall.Handle, reserved uint32, bytesLow uint32, bytesHigh uint32, overlapped *syscall.Overlapped) (err error) { + r1, _, e1 := syscall.Syscall6(procUnlockFileEx.Addr(), 5, uintptr(file), uintptr(reserved), uintptr(bytesLow), uintptr(bytesHigh), uintptr(unsafe.Pointer(overlapped)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func VirtualQuery(address uintptr, buffer *MemoryBasicInformation, length uintptr) (err error) { + r1, _, e1 := syscall.Syscall(procVirtualQuery.Addr(), 3, uintptr(address), uintptr(unsafe.Pointer(buffer)), uintptr(length)) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func NetShareAdd(serverName *uint16, level uint32, buf *byte, parmErr *uint16) (neterr error) { + r0, _, _ := syscall.Syscall6(procNetShareAdd.Addr(), 4, uintptr(unsafe.Pointer(serverName)), uintptr(level), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(parmErr)), 0, 0) + if r0 != 0 { + neterr = syscall.Errno(r0) + } + return +} + +func NetShareDel(serverName *uint16, netName *uint16, reserved uint32) (neterr error) { + r0, _, _ := syscall.Syscall(procNetShareDel.Addr(), 3, uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(netName)), uintptr(reserved)) + if r0 != 0 { + neterr = syscall.Errno(r0) + } + return +} + +func NetUserAdd(serverName *uint16, level uint32, buf *byte, parmErr *uint32) (neterr error) { + r0, _, _ := syscall.Syscall6(procNetUserAdd.Addr(), 4, uintptr(unsafe.Pointer(serverName)), uintptr(level), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(parmErr)), 0, 0) + if r0 != 0 { + neterr = syscall.Errno(r0) + } + return +} + +func NetUserDel(serverName *uint16, userName *uint16) (neterr error) { + r0, _, _ := syscall.Syscall(procNetUserDel.Addr(), 2, uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(userName)), 0) + if r0 != 0 { + neterr = syscall.Errno(r0) + } + return +} + +func NetUserGetLocalGroups(serverName *uint16, userName *uint16, level uint32, flags uint32, buf **byte, prefMaxLen uint32, entriesRead *uint32, totalEntries *uint32) (neterr error) { + r0, _, _ := syscall.Syscall9(procNetUserGetLocalGroups.Addr(), 8, uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(userName)), uintptr(level), uintptr(flags), uintptr(unsafe.Pointer(buf)), uintptr(prefMaxLen), uintptr(unsafe.Pointer(entriesRead)), uintptr(unsafe.Pointer(totalEntries)), 0) + if r0 != 0 { + neterr = syscall.Errno(r0) + } + return +} + +func NtCreateFile(handle *syscall.Handle, access uint32, oa *OBJECT_ATTRIBUTES, iosb *IO_STATUS_BLOCK, allocationSize *int64, attributes uint32, share uint32, disposition uint32, options uint32, eabuffer uintptr, ealength uint32) (ntstatus error) { + r0, _, _ := syscall.Syscall12(procNtCreateFile.Addr(), 11, uintptr(unsafe.Pointer(handle)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(allocationSize)), uintptr(attributes), uintptr(share), uintptr(disposition), uintptr(options), uintptr(eabuffer), uintptr(ealength), 0) + if r0 != 0 { + ntstatus = NTStatus(r0) + } + return +} + +func NtOpenFile(handle *syscall.Handle, access uint32, oa *OBJECT_ATTRIBUTES, iosb *IO_STATUS_BLOCK, share uint32, options uint32) (ntstatus error) { + r0, _, _ := syscall.Syscall6(procNtOpenFile.Addr(), 6, uintptr(unsafe.Pointer(handle)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(share), uintptr(options)) + if r0 != 0 { + ntstatus = NTStatus(r0) + } + return +} + +func NtSetInformationFile(handle syscall.Handle, iosb *IO_STATUS_BLOCK, inBuffer uintptr, inBufferLen uint32, class uint32) (ntstatus error) { + r0, _, _ := syscall.Syscall6(procNtSetInformationFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(iosb)), uintptr(inBuffer), uintptr(inBufferLen), uintptr(class), 0) + if r0 != 0 { + ntstatus = NTStatus(r0) + } + return +} + +func rtlGetVersion(info *_OSVERSIONINFOW) { + syscall.Syscall(procRtlGetVersion.Addr(), 1, uintptr(unsafe.Pointer(info)), 0, 0) + return +} + +func rtlNtStatusToDosErrorNoTeb(ntstatus NTStatus) (ret syscall.Errno) { + r0, _, _ := syscall.Syscall(procRtlNtStatusToDosErrorNoTeb.Addr(), 1, uintptr(ntstatus), 0, 0) + ret = syscall.Errno(r0) + return +} + +func GetProcessMemoryInfo(handle syscall.Handle, memCounters *PROCESS_MEMORY_COUNTERS, cb uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetProcessMemoryInfo.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(memCounters)), uintptr(cb)) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func CreateEnvironmentBlock(block **uint16, token syscall.Token, inheritExisting bool) (err error) { + var _p0 uint32 + if inheritExisting { + _p0 = 1 + } + r1, _, e1 := syscall.Syscall(procCreateEnvironmentBlock.Addr(), 3, uintptr(unsafe.Pointer(block)), uintptr(token), uintptr(_p0)) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func DestroyEnvironmentBlock(block *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procDestroyEnvironmentBlock.Addr(), 1, uintptr(unsafe.Pointer(block)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func GetProfilesDirectory(dir *uint16, dirLen *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetProfilesDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(dir)), uintptr(unsafe.Pointer(dirLen)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func WSAGetOverlappedResult(h syscall.Handle, o *syscall.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) { + var _p0 uint32 + if wait { + _p0 = 1 + } + r1, _, e1 := syscall.Syscall6(procWSAGetOverlappedResult.Addr(), 5, uintptr(h), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(bytes)), uintptr(_p0), uintptr(unsafe.Pointer(flags)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func WSASocket(af int32, typ int32, protocol int32, protinfo *syscall.WSAProtocolInfo, group uint32, flags uint32) (handle syscall.Handle, err error) { + r0, _, e1 := syscall.Syscall6(procWSASocketW.Addr(), 6, uintptr(af), uintptr(typ), uintptr(protocol), uintptr(unsafe.Pointer(protinfo)), uintptr(group), uintptr(flags)) + handle = syscall.Handle(r0) + if handle == syscall.InvalidHandle { + err = errnoErr(e1) + } + return +} diff --git a/testing/internal/sysinfo/cpuinfo_bsd.go b/testing/internal/sysinfo/cpuinfo_bsd.go new file mode 100644 index 0000000..4396a63 --- /dev/null +++ b/testing/internal/sysinfo/cpuinfo_bsd.go @@ -0,0 +1,14 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build darwin || freebsd || netbsd || openbsd + +package sysinfo + +import "syscall" + +func osCPUInfoName() string { + cpu, _ := syscall.Sysctl("machdep.cpu.brand_string") + return cpu +} diff --git a/testing/internal/sysinfo/cpuinfo_linux.go b/testing/internal/sysinfo/cpuinfo_linux.go new file mode 100644 index 0000000..ae92c51 --- /dev/null +++ b/testing/internal/sysinfo/cpuinfo_linux.go @@ -0,0 +1,75 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sysinfo + +import ( + "bufio" + "bytes" + "io" + "os" + "strings" +) + +func readLinuxProcCPUInfo(buf []byte) error { + f, err := os.Open("/proc/cpuinfo") + if err != nil { + return err + } + defer f.Close() + + _, err = io.ReadFull(f, buf) + if err != nil && err != io.ErrUnexpectedEOF { + return err + } + + return nil +} + +func osCPUInfoName() string { + modelName := "" + cpuMHz := "" + + // The 512-byte buffer is enough to hold the contents of CPU0 + buf := make([]byte, 512) + err := readLinuxProcCPUInfo(buf) + if err != nil { + return "" + } + + scanner := bufio.NewScanner(bytes.NewReader(buf)) + for scanner.Scan() { + key, value, found := strings.Cut(scanner.Text(), ": ") + if !found { + continue + } + switch strings.TrimSpace(key) { + case "Model Name", "model name": + modelName = value + case "CPU MHz", "cpu MHz": + cpuMHz = value + } + } + + if modelName == "" { + return "" + } + + if cpuMHz == "" { + return modelName + } + + // The modelName field already contains the frequency information, + // so the cpuMHz field information is not needed. + // modelName filed example: + // Intel(R) Core(TM) i7-10700 CPU @ 2.90GHz + f := [...]string{"GHz", "MHz"} + for _, v := range f { + if strings.Contains(modelName, v) { + return modelName + } + } + + return modelName + " @ " + cpuMHz + "MHz" +} diff --git a/testing/internal/sysinfo/cpuinfo_stub.go b/testing/internal/sysinfo/cpuinfo_stub.go new file mode 100644 index 0000000..2ac7ffa --- /dev/null +++ b/testing/internal/sysinfo/cpuinfo_stub.go @@ -0,0 +1,11 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !(darwin || freebsd || linux || netbsd || openbsd) + +package sysinfo + +func osCPUInfoName() string { + return "" +} diff --git a/testing/internal/sysinfo/export_test.go b/testing/internal/sysinfo/export_test.go new file mode 100644 index 0000000..809a683 --- /dev/null +++ b/testing/internal/sysinfo/export_test.go @@ -0,0 +1,7 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sysinfo + +var XosCPUInfoName = osCPUInfoName diff --git a/testing/internal/sysinfo/sysinfo.go b/testing/internal/sysinfo/sysinfo.go new file mode 100644 index 0000000..50d0f97 --- /dev/null +++ b/testing/internal/sysinfo/sysinfo.go @@ -0,0 +1,25 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package sysinfo implements high level hardware information gathering +// that can be used for debugging or information purposes. +package sysinfo + +import ( + "sync" + + "github.com/CodSpeedHQ/codspeed-go/testing/internal/cpu" +) + +var CPUName = sync.OnceValue(func() string { + if name := cpu.Name(); name != "" { + return name + } + + if name := osCPUInfoName(); name != "" { + return name + } + + return "" +}) diff --git a/testing/internal/sysinfo/sysinfo_test.go b/testing/internal/sysinfo/sysinfo_test.go new file mode 100644 index 0000000..6cf8cae --- /dev/null +++ b/testing/internal/sysinfo/sysinfo_test.go @@ -0,0 +1,16 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sysinfo_test + +import ( + "testing" + + . "github.com/CodSpeedHQ/codspeed-go/testing/internal/sysinfo" +) + +func TestCPUName(t *testing.T) { + t.Logf("CPUName: %s", CPUName()) + t.Logf("osCPUInfoName: %s", XosCPUInfoName()) +} diff --git a/testing/internal/testenv/exec.go b/testing/internal/testenv/exec.go new file mode 100644 index 0000000..7b251b6 --- /dev/null +++ b/testing/internal/testenv/exec.go @@ -0,0 +1,242 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package testenv + +import ( + "context" + "errors" + "fmt" + "os" + "os/exec" + "runtime" + "strconv" + "strings" + "sync" + "testing" + "time" +) + +// MustHaveExec checks that the current system can start new processes +// using os.StartProcess or (more commonly) exec.Command. +// If not, MustHaveExec calls t.Skip with an explanation. +// +// On some platforms MustHaveExec checks for exec support by re-executing the +// current executable, which must be a binary built by 'go test'. +// We intentionally do not provide a HasExec function because of the risk of +// inappropriate recursion in TestMain functions. +// +// To check for exec support outside of a test, just try to exec the command. +// If exec is not supported, testenv.SyscallIsNotSupported will return true +// for the resulting error. +func MustHaveExec(t testing.TB) { + if err := tryExec(); err != nil { + msg := fmt.Sprintf("cannot exec subprocess on %s/%s: %v", runtime.GOOS, runtime.GOARCH, err) + if t == nil { + panic(msg) + } + t.Helper() + t.Skip("skipping test:", msg) + } +} + +var tryExec = sync.OnceValue(func() error { + switch runtime.GOOS { + case "wasip1", "js", "ios": + default: + // Assume that exec always works on non-mobile platforms and Android. + return nil + } + + // ios has an exec syscall but on real iOS devices it might return a + // permission error. In an emulated environment (such as a Corellium host) + // it might succeed, so if we need to exec we'll just have to try it and + // find out. + // + // As of 2023-04-19 wasip1 and js don't have exec syscalls at all, but we + // may as well use the same path so that this branch can be tested without + // an ios environment. + + if !testing.Testing() { + // This isn't a standard 'go test' binary, so we don't know how to + // self-exec in a way that should succeed without side effects. + // Just forget it. + return errors.New("can't probe for exec support with a non-test executable") + } + + // We know that this is a test executable. We should be able to run it with a + // no-op flag to check for overall exec support. + exe, err := exePath() + if err != nil { + return fmt.Errorf("can't probe for exec support: %w", err) + } + cmd := exec.Command(exe, "-test.list=^$") + cmd.Env = origEnv + return cmd.Run() +}) + +// Executable is a wrapper around [MustHaveExec] and [os.Executable]. +// It returns the path name for the executable that started the current process, +// or skips the test if the current system can't start new processes, +// or fails the test if the path can not be obtained. +func Executable(t testing.TB) string { + MustHaveExec(t) + + exe, err := exePath() + if err != nil { + msg := fmt.Sprintf("os.Executable error: %v", err) + if t == nil { + panic(msg) + } + t.Fatal(msg) + } + return exe +} + +var exePath = sync.OnceValues(func() (string, error) { + return os.Executable() +}) + +var execPaths sync.Map // path -> error + +// MustHaveExecPath checks that the current system can start the named executable +// using os.StartProcess or (more commonly) exec.Command. +// If not, MustHaveExecPath calls t.Skip with an explanation. +func MustHaveExecPath(t testing.TB, path string) { + MustHaveExec(t) + + err, found := execPaths.Load(path) + if !found { + _, err = exec.LookPath(path) + err, _ = execPaths.LoadOrStore(path, err) + } + if err != nil { + t.Helper() + t.Skipf("skipping test: %s: %s", path, err) + } +} + +// CleanCmdEnv will fill cmd.Env with the environment, excluding certain +// variables that could modify the behavior of the Go tools such as +// GODEBUG and GOTRACEBACK. +// +// If the caller wants to set cmd.Dir, set it before calling this function, +// so PWD will be set correctly in the environment. +func CleanCmdEnv(cmd *exec.Cmd) *exec.Cmd { + if cmd.Env != nil { + panic("environment already set") + } + for _, env := range cmd.Environ() { + // Exclude GODEBUG from the environment to prevent its output + // from breaking tests that are trying to parse other command output. + if strings.HasPrefix(env, "GODEBUG=") { + continue + } + // Exclude GOTRACEBACK for the same reason. + if strings.HasPrefix(env, "GOTRACEBACK=") { + continue + } + cmd.Env = append(cmd.Env, env) + } + return cmd +} + +// CommandContext is like exec.CommandContext, but: +// - skips t if the platform does not support os/exec, +// - sends SIGQUIT (if supported by the platform) instead of SIGKILL +// in its Cancel function +// - if the test has a deadline, adds a Context timeout and WaitDelay +// for an arbitrary grace period before the test's deadline expires, +// - fails the test if the command does not complete before the test's deadline, and +// - sets a Cleanup function that verifies that the test did not leak a subprocess. +func CommandContext(t testing.TB, ctx context.Context, name string, args ...string) *exec.Cmd { + t.Helper() + MustHaveExec(t) + + var ( + cancelCtx context.CancelFunc + gracePeriod time.Duration // unlimited unless the test has a deadline (to allow for interactive debugging) + ) + + if t, ok := t.(interface { + testing.TB + Deadline() (time.Time, bool) + }); ok { + if td, ok := t.Deadline(); ok { + // Start with a minimum grace period, just long enough to consume the + // output of a reasonable program after it terminates. + gracePeriod = 100 * time.Millisecond + if s := os.Getenv("GO_TEST_TIMEOUT_SCALE"); s != "" { + scale, err := strconv.Atoi(s) + if err != nil { + t.Fatalf("invalid GO_TEST_TIMEOUT_SCALE: %v", err) + } + gracePeriod *= time.Duration(scale) + } + + // If time allows, increase the termination grace period to 5% of the + // test's remaining time. + testTimeout := time.Until(td) + if gp := testTimeout / 20; gp > gracePeriod { + gracePeriod = gp + } + + // When we run commands that execute subprocesses, we want to reserve two + // grace periods to clean up: one for the delay between the first + // termination signal being sent (via the Cancel callback when the Context + // expires) and the process being forcibly terminated (via the WaitDelay + // field), and a second one for the delay between the process being + // terminated and the test logging its output for debugging. + // + // (We want to ensure that the test process itself has enough time to + // log the output before it is also terminated.) + cmdTimeout := testTimeout - 2*gracePeriod + + if cd, ok := ctx.Deadline(); !ok || time.Until(cd) > cmdTimeout { + // Either ctx doesn't have a deadline, or its deadline would expire + // after (or too close before) the test has already timed out. + // Add a shorter timeout so that the test will produce useful output. + ctx, cancelCtx = context.WithTimeout(ctx, cmdTimeout) + } + } + } + + cmd := exec.CommandContext(ctx, name, args...) + cmd.Cancel = func() error { + if cancelCtx != nil && ctx.Err() == context.DeadlineExceeded { + // The command timed out due to running too close to the test's deadline. + // There is no way the test did that intentionally — it's too close to the + // wire! — so mark it as a test failure. That way, if the test expects the + // command to fail for some other reason, it doesn't have to distinguish + // between that reason and a timeout. + t.Errorf("test timed out while running command: %v", cmd) + } else { + // The command is being terminated due to ctx being canceled, but + // apparently not due to an explicit test deadline that we added. + // Log that information in case it is useful for diagnosing a failure, + // but don't actually fail the test because of it. + t.Logf("%v: terminating command: %v", ctx.Err(), cmd) + } + return cmd.Process.Signal(Sigquit) + } + cmd.WaitDelay = gracePeriod + + t.Cleanup(func() { + if cancelCtx != nil { + cancelCtx() + } + if cmd.Process != nil && cmd.ProcessState == nil { + t.Errorf("command was started, but test did not wait for it to complete: %v", cmd) + } + }) + + return cmd +} + +// Command is like exec.Command, but applies the same changes as +// testenv.CommandContext (with a default Context). +func Command(t testing.TB, name string, args ...string) *exec.Cmd { + t.Helper() + return CommandContext(t, context.Background(), name, args...) +} diff --git a/testing/internal/testenv/noopt.go b/testing/internal/testenv/noopt.go new file mode 100644 index 0000000..ae2a3d0 --- /dev/null +++ b/testing/internal/testenv/noopt.go @@ -0,0 +1,12 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build noopt + +package testenv + +// OptimizationOff reports whether optimization is disabled. +func OptimizationOff() bool { + return true +} diff --git a/testing/internal/testenv/opt.go b/testing/internal/testenv/opt.go new file mode 100644 index 0000000..1bb96f7 --- /dev/null +++ b/testing/internal/testenv/opt.go @@ -0,0 +1,12 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !noopt + +package testenv + +// OptimizationOff reports whether optimization is disabled. +func OptimizationOff() bool { + return false +} diff --git a/testing/internal/testenv/testenv.go b/testing/internal/testenv/testenv.go new file mode 100644 index 0000000..b05c04b --- /dev/null +++ b/testing/internal/testenv/testenv.go @@ -0,0 +1,530 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package testenv provides information about what functionality +// is available in different testing environments run by the Go team. +// +// It is an internal package because these details are specific +// to the Go team's test setup (on build.golang.org) and not +// fundamental to tests in general. +package testenv + +import ( + "bytes" + "errors" + "flag" + "fmt" + "os" + "os/exec" + "path/filepath" + "runtime" + "strconv" + "strings" + "sync" + "testing" + + "github.com/CodSpeedHQ/codspeed-go/testing/internal/cfg" + "github.com/CodSpeedHQ/codspeed-go/testing/internal/goarch" + "github.com/CodSpeedHQ/codspeed-go/testing/internal/platform" +) + +// Save the original environment during init for use in checks. A test +// binary may modify its environment before calling HasExec to change its +// behavior (such as mimicking a command-line tool), and that modified +// environment might cause environment checks to behave erratically. +var origEnv = os.Environ() + +// Builder reports the name of the builder running this test +// (for example, "linux-amd64" or "windows-386-gce"). +// If the test is not running on the build infrastructure, +// Builder returns the empty string. +func Builder() string { + return os.Getenv("GO_BUILDER_NAME") +} + +// HasGoBuild reports whether the current system can build programs with “go build” +// and then run them with os.StartProcess or exec.Command. +func HasGoBuild() bool { + if os.Getenv("GO_GCFLAGS") != "" { + // It's too much work to require every caller of the go command + // to pass along "-gcflags="+os.Getenv("GO_GCFLAGS"). + // For now, if $GO_GCFLAGS is set, report that we simply can't + // run go build. + return false + } + + return tryGoBuild() == nil +} + +var tryGoBuild = sync.OnceValue(func() error { + // To run 'go build', we need to be able to exec a 'go' command. + // We somewhat arbitrarily choose to exec 'go tool -n compile' because that + // also confirms that cmd/go can find the compiler. (Before CL 472096, + // we sometimes ended up with cmd/go installed in the test environment + // without a cmd/compile it could use to actually build things.) + goTool, err := goTool() + if err != nil { + return err + } + cmd := exec.Command(goTool, "tool", "-n", "compile") + cmd.Env = origEnv + out, err := cmd.Output() + if err != nil { + return fmt.Errorf("%v: %w", cmd, err) + } + out = bytes.TrimSpace(out) + if len(out) == 0 { + return fmt.Errorf("%v: no tool reported", cmd) + } + if _, err := exec.LookPath(string(out)); err != nil { + return err + } + + if platform.MustLinkExternal(runtime.GOOS, runtime.GOARCH, false) { + // We can assume that we always have a complete Go toolchain available. + // However, this platform requires a C linker to build even pure Go + // programs, including tests. Do we have one in the test environment? + // (On Android, for example, the device running the test might not have a + // C toolchain installed.) + // + // If CC is set explicitly, assume that we do. Otherwise, use 'go env CC' + // to determine which toolchain it would use by default. + if os.Getenv("CC") == "" { + cmd := exec.Command(goTool, "env", "CC") + cmd.Env = origEnv + out, err := cmd.Output() + if err != nil { + return fmt.Errorf("%v: %w", cmd, err) + } + out = bytes.TrimSpace(out) + if len(out) == 0 { + return fmt.Errorf("%v: no CC reported", cmd) + } + _, err = exec.LookPath(string(out)) + return err + } + } + return nil +}) + +// MustHaveGoBuild checks that the current system can build programs with “go build” +// and then run them with os.StartProcess or exec.Command. +// If not, MustHaveGoBuild calls t.Skip with an explanation. +func MustHaveGoBuild(t testing.TB) { + if os.Getenv("GO_GCFLAGS") != "" { + t.Helper() + t.Skipf("skipping test: 'go build' not compatible with setting $GO_GCFLAGS") + } + if !HasGoBuild() { + t.Helper() + t.Skipf("skipping test: 'go build' unavailable: %v", tryGoBuild()) + } +} + +// HasGoRun reports whether the current system can run programs with “go run”. +func HasGoRun() bool { + // For now, having go run and having go build are the same. + return HasGoBuild() +} + +// MustHaveGoRun checks that the current system can run programs with “go run”. +// If not, MustHaveGoRun calls t.Skip with an explanation. +func MustHaveGoRun(t testing.TB) { + if !HasGoRun() { + t.Helper() + t.Skipf("skipping test: 'go run' not available on %s/%s", runtime.GOOS, runtime.GOARCH) + } +} + +// HasParallelism reports whether the current system can execute multiple +// threads in parallel. +// There is a copy of this function in cmd/dist/test.go. +func HasParallelism() bool { + switch runtime.GOOS { + case "js", "wasip1": + return false + } + return true +} + +// MustHaveParallelism checks that the current system can execute multiple +// threads in parallel. If not, MustHaveParallelism calls t.Skip with an explanation. +func MustHaveParallelism(t testing.TB) { + if !HasParallelism() { + t.Helper() + t.Skipf("skipping test: no parallelism available on %s/%s", runtime.GOOS, runtime.GOARCH) + } +} + +// GoToolPath reports the path to the Go tool. +// It is a convenience wrapper around GoTool. +// If the tool is unavailable GoToolPath calls t.Skip. +// If the tool should be available and isn't, GoToolPath calls t.Fatal. +func GoToolPath(t testing.TB) string { + MustHaveGoBuild(t) + path, err := GoTool() + if err != nil { + t.Fatal(err) + } + // Add all environment variables that affect the Go command to test metadata. + // Cached test results will be invalidate when these variables change. + // See golang.org/issue/32285. + for _, envVar := range strings.Fields(cfg.KnownEnv) { + os.Getenv(envVar) + } + return path +} + +var findGOROOT = sync.OnceValues(func() (path string, err error) { + if path := runtime.GOROOT(); path != "" { + // If runtime.GOROOT() is non-empty, assume that it is valid. + // + // (It might not be: for example, the user may have explicitly set GOROOT + // to the wrong directory. But this case is + // rare, and if that happens the user can fix what they broke.) + return path, nil + } + + // runtime.GOROOT doesn't know where GOROOT is (perhaps because the test + // binary was built with -trimpath). + // + // Since this is internal/testenv, we can cheat and assume that the caller + // is a test of some package in a subdirectory of GOROOT/src. ('go test' + // runs the test in the directory containing the packaged under test.) That + // means that if we start walking up the tree, we should eventually find + // GOROOT/src/go.mod, and we can report the parent directory of that. + // + // Notably, this works even if we can't run 'go env GOROOT' as a + // subprocess. + + cwd, err := os.Getwd() + if err != nil { + return "", fmt.Errorf("finding GOROOT: %w", err) + } + + dir := cwd + for { + parent := filepath.Dir(dir) + if parent == dir { + // dir is either "." or only a volume name. + return "", fmt.Errorf("failed to locate GOROOT/src in any parent directory") + } + + if base := filepath.Base(dir); base != "src" { + dir = parent + continue // dir cannot be GOROOT/src if it doesn't end in "src". + } + + b, err := os.ReadFile(filepath.Join(dir, "go.mod")) + if err != nil { + if os.IsNotExist(err) { + dir = parent + continue + } + return "", fmt.Errorf("finding GOROOT: %w", err) + } + goMod := string(b) + + for goMod != "" { + var line string + line, goMod, _ = strings.Cut(goMod, "\n") + fields := strings.Fields(line) + if len(fields) >= 2 && fields[0] == "module" && fields[1] == "std" { + // Found "module std", which is the module declaration in GOROOT/src! + return parent, nil + } + } + } +}) + +// GOROOT reports the path to the directory containing the root of the Go +// project source tree. This is normally equivalent to runtime.GOROOT, but +// works even if the test binary was built with -trimpath and cannot exec +// 'go env GOROOT'. +// +// If GOROOT cannot be found, GOROOT skips t if t is non-nil, +// or panics otherwise. +func GOROOT(t testing.TB) string { + path, err := findGOROOT() + if err != nil { + if t == nil { + panic(err) + } + t.Helper() + t.Skip(err) + } + return path +} + +// GoTool reports the path to the Go tool. +func GoTool() (string, error) { + if !HasGoBuild() { + return "", errors.New("platform cannot run go tool") + } + return goTool() +} + +var goTool = sync.OnceValues(func() (string, error) { + return exec.LookPath("go") +}) + +// MustHaveSource checks that the entire source tree is available under GOROOT. +// If not, it calls t.Skip with an explanation. +func MustHaveSource(t testing.TB) { + switch runtime.GOOS { + case "ios": + t.Helper() + t.Skip("skipping test: no source tree on " + runtime.GOOS) + } +} + +// HasExternalNetwork reports whether the current system can use +// external (non-localhost) networks. +func HasExternalNetwork() bool { + return !testing.Short() && runtime.GOOS != "js" && runtime.GOOS != "wasip1" +} + +// MustHaveExternalNetwork checks that the current system can use +// external (non-localhost) networks. +// If not, MustHaveExternalNetwork calls t.Skip with an explanation. +func MustHaveExternalNetwork(t testing.TB) { + if runtime.GOOS == "js" || runtime.GOOS == "wasip1" { + t.Helper() + t.Skipf("skipping test: no external network on %s", runtime.GOOS) + } + if testing.Short() { + t.Helper() + t.Skipf("skipping test: no external network in -short mode") + } +} + +// HasCGO reports whether the current system can use cgo. +func HasCGO() bool { + return hasCgo() +} + +var hasCgo = sync.OnceValue(func() bool { + goTool, err := goTool() + if err != nil { + return false + } + cmd := exec.Command(goTool, "env", "CGO_ENABLED") + cmd.Env = origEnv + out, err := cmd.Output() + if err != nil { + panic(fmt.Sprintf("%v: %v", cmd, out)) + } + ok, err := strconv.ParseBool(string(bytes.TrimSpace(out))) + if err != nil { + panic(fmt.Sprintf("%v: non-boolean output %q", cmd, out)) + } + return ok +}) + +// MustHaveCGO calls t.Skip if cgo is not available. +func MustHaveCGO(t testing.TB) { + if !HasCGO() { + t.Helper() + t.Skipf("skipping test: no cgo") + } +} + +// CanInternalLink reports whether the current system can link programs with +// internal linking. +func CanInternalLink(withCgo bool) bool { + return !platform.MustLinkExternal(runtime.GOOS, runtime.GOARCH, withCgo) +} + +// MustInternalLink checks that the current system can link programs with internal +// linking. +// If not, MustInternalLink calls t.Skip with an explanation. +func MustInternalLink(t testing.TB, withCgo bool) { + if !CanInternalLink(withCgo) { + t.Helper() + if withCgo && CanInternalLink(false) { + t.Skipf("skipping test: internal linking on %s/%s is not supported with cgo", runtime.GOOS, runtime.GOARCH) + } + t.Skipf("skipping test: internal linking on %s/%s is not supported", runtime.GOOS, runtime.GOARCH) + } +} + +// MustInternalLinkPIE checks whether the current system can link PIE binary using +// internal linking. +// If not, MustInternalLinkPIE calls t.Skip with an explanation. +func MustInternalLinkPIE(t testing.TB) { + if !platform.InternalLinkPIESupported(runtime.GOOS, runtime.GOARCH) { + t.Helper() + t.Skipf("skipping test: internal linking for buildmode=pie on %s/%s is not supported", runtime.GOOS, runtime.GOARCH) + } +} + +// MustHaveBuildMode reports whether the current system can build programs in +// the given build mode. +// If not, MustHaveBuildMode calls t.Skip with an explanation. +func MustHaveBuildMode(t testing.TB, buildmode string) { + if !platform.BuildModeSupported(runtime.Compiler, buildmode, runtime.GOOS, runtime.GOARCH) { + t.Helper() + t.Skipf("skipping test: build mode %s on %s/%s is not supported by the %s compiler", buildmode, runtime.GOOS, runtime.GOARCH, runtime.Compiler) + } +} + +// HasSymlink reports whether the current system can use os.Symlink. +func HasSymlink() bool { + ok, _ := hasSymlink() + return ok +} + +// MustHaveSymlink reports whether the current system can use os.Symlink. +// If not, MustHaveSymlink calls t.Skip with an explanation. +func MustHaveSymlink(t testing.TB) { + ok, reason := hasSymlink() + if !ok { + t.Helper() + t.Skipf("skipping test: cannot make symlinks on %s/%s: %s", runtime.GOOS, runtime.GOARCH, reason) + } +} + +// HasLink reports whether the current system can use os.Link. +func HasLink() bool { + // From Android release M (Marshmallow), hard linking files is blocked + // and an attempt to call link() on a file will return EACCES. + // - https://code.google.com/p/android-developer-preview/issues/detail?id=3150 + return runtime.GOOS != "plan9" && runtime.GOOS != "android" +} + +// MustHaveLink reports whether the current system can use os.Link. +// If not, MustHaveLink calls t.Skip with an explanation. +func MustHaveLink(t testing.TB) { + if !HasLink() { + t.Helper() + t.Skipf("skipping test: hardlinks are not supported on %s/%s", runtime.GOOS, runtime.GOARCH) + } +} + +var flaky = flag.Bool("flaky", false, "run known-flaky tests too") + +func SkipFlaky(t testing.TB, issue int) { + if !*flaky { + t.Helper() + t.Skipf("skipping known flaky test without the -flaky flag; see golang.org/issue/%d", issue) + } +} + +func SkipFlakyNet(t testing.TB) { + if v, _ := strconv.ParseBool(os.Getenv("GO_BUILDER_FLAKY_NET")); v { + t.Helper() + t.Skip("skipping test on builder known to have frequent network failures") + } +} + +// CPUIsSlow reports whether the CPU running the test is suspected to be slow. +func CPUIsSlow() bool { + switch runtime.GOARCH { + case "arm", "mips", "mipsle", "mips64", "mips64le", "wasm": + return true + } + return false +} + +// SkipIfShortAndSlow skips t if -short is set and the CPU running the test is +// suspected to be slow. +// +// (This is useful for CPU-intensive tests that otherwise complete quickly.) +func SkipIfShortAndSlow(t testing.TB) { + if testing.Short() && CPUIsSlow() { + t.Helper() + t.Skipf("skipping test in -short mode on %s", runtime.GOARCH) + } +} + +// SkipIfOptimizationOff skips t if optimization is disabled. +func SkipIfOptimizationOff(t testing.TB) { + if OptimizationOff() { + t.Helper() + t.Skip("skipping test with optimization disabled") + } +} + +// WriteImportcfg writes an importcfg file used by the compiler or linker to +// dstPath containing entries for the file mappings in packageFiles, as well +// as for the packages transitively imported by the package(s) in pkgs. +// +// pkgs may include any package pattern that is valid to pass to 'go list', +// so it may also be a list of Go source files all in the same directory. +func WriteImportcfg(t testing.TB, dstPath string, packageFiles map[string]string, pkgs ...string) { + t.Helper() + + icfg := new(bytes.Buffer) + icfg.WriteString("# import config\n") + for k, v := range packageFiles { + fmt.Fprintf(icfg, "packagefile %s=%s\n", k, v) + } + + if len(pkgs) > 0 { + // Use 'go list' to resolve any missing packages and rewrite the import map. + cmd := Command(t, GoToolPath(t), "list", "-export", "-deps", "-f", `{{if ne .ImportPath "command-line-arguments"}}{{if .Export}}{{.ImportPath}}={{.Export}}{{end}}{{end}}`) + cmd.Args = append(cmd.Args, pkgs...) + cmd.Stderr = new(strings.Builder) + out, err := cmd.Output() + if err != nil { + t.Fatalf("%v: %v\n%s", cmd, err, cmd.Stderr) + } + + for _, line := range strings.Split(string(out), "\n") { + if line == "" { + continue + } + importPath, export, ok := strings.Cut(line, "=") + if !ok { + t.Fatalf("invalid line in output from %v:\n%s", cmd, line) + } + if packageFiles[importPath] == "" { + fmt.Fprintf(icfg, "packagefile %s=%s\n", importPath, export) + } + } + } + + if err := os.WriteFile(dstPath, icfg.Bytes(), 0666); err != nil { + t.Fatal(err) + } +} + +// SyscallIsNotSupported reports whether err may indicate that a system call is +// not supported by the current platform or execution environment. +func SyscallIsNotSupported(err error) bool { + return syscallIsNotSupported(err) +} + +// ParallelOn64Bit calls t.Parallel() unless there is a case that cannot be parallel. +// This function should be used when it is necessary to avoid t.Parallel on +// 32-bit machines, typically because the test uses lots of memory. +func ParallelOn64Bit(t *testing.T) { + if goarch.PtrSize == 4 { + return + } + t.Parallel() +} + +// CPUProfilingBroken returns true if CPU profiling has known issues on this +// platform. +func CPUProfilingBroken() bool { + switch runtime.GOOS { + case "plan9": + // Profiling unimplemented. + return true + case "aix": + // See https://golang.org/issue/45170. + return true + case "ios", "dragonfly", "netbsd", "illumos", "solaris": + // See https://golang.org/issue/13841. + return true + case "openbsd": + if runtime.GOARCH == "arm" || runtime.GOARCH == "arm64" { + // See https://golang.org/issue/13841. + return true + } + } + + return false +} diff --git a/testing/internal/testenv/testenv_notunix.go b/testing/internal/testenv/testenv_notunix.go new file mode 100644 index 0000000..a7df5f5 --- /dev/null +++ b/testing/internal/testenv/testenv_notunix.go @@ -0,0 +1,21 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build windows || plan9 || (js && wasm) || wasip1 + +package testenv + +import ( + "errors" + "io/fs" + "os" +) + +// Sigquit is the signal to send to kill a hanging subprocess. +// On Unix we send SIGQUIT, but on non-Unix we only have os.Kill. +var Sigquit = os.Kill + +func syscallIsNotSupported(err error) bool { + return errors.Is(err, fs.ErrPermission) || errors.Is(err, errors.ErrUnsupported) +} diff --git a/testing/internal/testenv/testenv_notwin.go b/testing/internal/testenv/testenv_notwin.go new file mode 100644 index 0000000..9dddea9 --- /dev/null +++ b/testing/internal/testenv/testenv_notwin.go @@ -0,0 +1,47 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !windows + +package testenv + +import ( + "fmt" + "os" + "path/filepath" + "runtime" + "sync" +) + +var hasSymlink = sync.OnceValues(func() (ok bool, reason string) { + switch runtime.GOOS { + case "plan9": + return false, "" + case "android", "wasip1": + // For wasip1, some runtimes forbid absolute symlinks, + // or symlinks that escape the current working directory. + // Perform a simple test to see whether the runtime + // supports symlinks or not. If we get a permission + // error, the runtime does not support symlinks. + dir, err := os.MkdirTemp("", "") + if err != nil { + return false, "" + } + defer func() { + _ = os.RemoveAll(dir) + }() + fpath := filepath.Join(dir, "testfile.txt") + if err := os.WriteFile(fpath, nil, 0644); err != nil { + return false, "" + } + if err := os.Symlink(fpath, filepath.Join(dir, "testlink")); err != nil { + if SyscallIsNotSupported(err) { + return false, fmt.Sprintf("symlinks unsupported: %s", err.Error()) + } + return false, "" + } + } + + return true, "" +}) diff --git a/testing/internal/testenv/testenv_test.go b/testing/internal/testenv/testenv_test.go new file mode 100644 index 0000000..7a826a6 --- /dev/null +++ b/testing/internal/testenv/testenv_test.go @@ -0,0 +1,209 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package testenv_test + +import ( + "os" + "path/filepath" + "runtime" + "strings" + "testing" + + "github.com/CodSpeedHQ/codspeed-go/testing/internal/platform" + "github.com/CodSpeedHQ/codspeed-go/testing/internal/testenv" +) + +func TestGoToolLocation(t *testing.T) { + testenv.MustHaveGoBuild(t) + + var exeSuffix string + if runtime.GOOS == "windows" { + exeSuffix = ".exe" + } + + // Tests are defined to run within their package source directory, + // and this package's source directory is $GOROOT/src/internal/testenv. + // The 'go' command is installed at $GOROOT/bin/go, so if the environment + // is correct then testenv.GoTool() should be identical to ../../../bin/go. + + relWant := "../../../bin/go" + exeSuffix + absWant, err := filepath.Abs(relWant) + if err != nil { + t.Fatal(err) + } + + wantInfo, err := os.Stat(absWant) + if err != nil { + t.Fatal(err) + } + t.Logf("found go tool at %q (%q)", relWant, absWant) + + goTool, err := testenv.GoTool() + if err != nil { + t.Fatalf("testenv.GoTool(): %v", err) + } + t.Logf("testenv.GoTool() = %q", goTool) + + gotInfo, err := os.Stat(goTool) + if err != nil { + t.Fatal(err) + } + if !os.SameFile(wantInfo, gotInfo) { + t.Fatalf("%q is not the same file as %q", absWant, goTool) + } +} + +func TestHasGoBuild(t *testing.T) { + if !testenv.HasGoBuild() { + switch runtime.GOOS { + case "js", "wasip1": + // No exec syscall, so these shouldn't be able to 'go build'. + t.Logf("HasGoBuild is false on %s", runtime.GOOS) + return + } + + b := testenv.Builder() + if b == "" { + // We shouldn't make assumptions about what kind of sandbox or build + // environment external Go users may be running in. + t.Skipf("skipping: 'go build' unavailable") + } + + // Since we control the Go builders, we know which ones ought + // to be able to run 'go build'. Check that they can. + // + // (Note that we don't verify that any builders *can't* run 'go build'. + // If a builder starts running 'go build' tests when it shouldn't, + // we will presumably find out about it when those tests fail.) + switch runtime.GOOS { + case "ios": + if isCorelliumBuilder(b) { + // The corellium environment is self-hosting, so it should be able + // to build even though real "ios" devices can't exec. + } else { + // The usual iOS sandbox does not allow the app to start another + // process. If we add builders on stock iOS devices, they presumably + // will not be able to exec, so we may as well allow that now. + t.Logf("HasGoBuild is false on %s", b) + return + } + case "android": + if isEmulatedBuilder(b) && platform.MustLinkExternal(runtime.GOOS, runtime.GOARCH, false) { + // As of 2023-05-02, the test environment on the emulated builders is + // missing a C linker. + t.Logf("HasGoBuild is false on %s", b) + return + } + } + + if strings.Contains(b, "-noopt") { + // The -noopt builder sets GO_GCFLAGS, which causes tests of 'go build' to + // be skipped. + t.Logf("HasGoBuild is false on %s", b) + return + } + + t.Fatalf("HasGoBuild unexpectedly false on %s", b) + } + + t.Logf("HasGoBuild is true; checking consistency with other functions") + + hasExec := false + hasExecGo := false + t.Run("MustHaveExec", func(t *testing.T) { + testenv.MustHaveExec(t) + hasExec = true + }) + t.Run("MustHaveExecPath", func(t *testing.T) { + testenv.MustHaveExecPath(t, "go") + hasExecGo = true + }) + if !hasExec { + t.Errorf(`MustHaveExec(t) skipped unexpectedly`) + } + if !hasExecGo { + t.Errorf(`MustHaveExecPath(t, "go") skipped unexpectedly`) + } + + dir := t.TempDir() + mainGo := filepath.Join(dir, "main.go") + if err := os.WriteFile(mainGo, []byte("package main\nfunc main() {}\n"), 0644); err != nil { + t.Fatal(err) + } + cmd := testenv.Command(t, "go", "build", "-o", os.DevNull, mainGo) + out, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("%v: %v\n%s", cmd, err, out) + } +} + +func TestMustHaveExec(t *testing.T) { + hasExec := false + t.Run("MustHaveExec", func(t *testing.T) { + testenv.MustHaveExec(t) + t.Logf("MustHaveExec did not skip") + hasExec = true + }) + + switch runtime.GOOS { + case "js", "wasip1": + if hasExec { + // js and wasip1 lack an “exec” syscall. + t.Errorf("expected MustHaveExec to skip on %v", runtime.GOOS) + } + case "ios": + if b := testenv.Builder(); isCorelliumBuilder(b) && !hasExec { + // Most ios environments can't exec, but the corellium builder can. + t.Errorf("expected MustHaveExec not to skip on %v", b) + } + default: + if b := testenv.Builder(); b != "" && !hasExec { + t.Errorf("expected MustHaveExec not to skip on %v", b) + } + } +} + +func TestCleanCmdEnvPWD(t *testing.T) { + // Test that CleanCmdEnv sets PWD if cmd.Dir is set. + switch runtime.GOOS { + case "plan9", "windows": + t.Skipf("PWD is not used on %s", runtime.GOOS) + } + dir := t.TempDir() + cmd := testenv.Command(t, testenv.GoToolPath(t), "help") + cmd.Dir = dir + cmd = testenv.CleanCmdEnv(cmd) + + for _, env := range cmd.Env { + if strings.HasPrefix(env, "PWD=") { + pwd := strings.TrimPrefix(env, "PWD=") + if pwd != dir { + t.Errorf("unexpected PWD: want %s, got %s", dir, pwd) + } + return + } + } + t.Error("PWD not set in cmd.Env") +} + +func isCorelliumBuilder(builderName string) bool { + // Support both the old infra's builder names and the LUCI builder names. + // The former's names are ad-hoc so we could maintain this invariant on + // the builder side. The latter's names are structured, and "corellium" will + // appear as a "host" suffix after the GOOS and GOARCH, which always begin + // with an underscore. + return strings.HasSuffix(builderName, "-corellium") || strings.Contains(builderName, "_corellium") +} + +func isEmulatedBuilder(builderName string) bool { + // Support both the old infra's builder names and the LUCI builder names. + // The former's names are ad-hoc so we could maintain this invariant on + // the builder side. The latter's names are structured, and the signifier + // of emulation "emu" will appear as a "host" suffix after the GOOS and + // GOARCH because it modifies the run environment in such a way that it + // the target GOOS and GOARCH may not match the host. This suffix always + // begins with an underscore. + return strings.HasSuffix(builderName, "-emu") || strings.Contains(builderName, "_emu") +} diff --git a/testing/internal/testenv/testenv_unix.go b/testing/internal/testenv/testenv_unix.go new file mode 100644 index 0000000..a629078 --- /dev/null +++ b/testing/internal/testenv/testenv_unix.go @@ -0,0 +1,43 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build unix + +package testenv + +import ( + "errors" + "io/fs" + "syscall" +) + +// Sigquit is the signal to send to kill a hanging subprocess. +// Send SIGQUIT to get a stack trace. +var Sigquit = syscall.SIGQUIT + +func syscallIsNotSupported(err error) bool { + if err == nil { + return false + } + + var errno syscall.Errno + if errors.As(err, &errno) { + switch errno { + case syscall.EPERM, syscall.EROFS: + // User lacks permission: either the call requires root permission and the + // user is not root, or the call is denied by a container security policy. + return true + case syscall.EINVAL: + // Some containers return EINVAL instead of EPERM if a system call is + // denied by security policy. + return true + } + } + + if errors.Is(err, fs.ErrPermission) || errors.Is(err, errors.ErrUnsupported) { + return true + } + + return false +} diff --git a/testing/internal/testenv/testenv_windows.go b/testing/internal/testenv/testenv_windows.go new file mode 100644 index 0000000..eed53cd --- /dev/null +++ b/testing/internal/testenv/testenv_windows.go @@ -0,0 +1,32 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package testenv + +import ( + "errors" + "os" + "path/filepath" + "sync" + "syscall" +) + +var hasSymlink = sync.OnceValues(func() (bool, string) { + tmpdir, err := os.MkdirTemp("", "symtest") + if err != nil { + panic("failed to create temp directory: " + err.Error()) + } + defer os.RemoveAll(tmpdir) + + err = os.Symlink("target", filepath.Join(tmpdir, "symlink")) + switch { + case err == nil: + return true, "" + case errors.Is(err, syscall.EWINDOWS): + return false, ": symlinks are not supported on your version of Windows" + case errors.Is(err, syscall.ERROR_PRIVILEGE_NOT_HELD): + return false, ": you don't have enough privileges to create symlinks" + } + return false, "" +}) diff --git a/testing/internal/testlog/exit.go b/testing/internal/testlog/exit.go new file mode 100644 index 0000000..b985c6b --- /dev/null +++ b/testing/internal/testlog/exit.go @@ -0,0 +1,45 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package testlog + +import ( + "sync" + _ "unsafe" // for linkname +) + +// PanicOnExit0 reports whether to panic on a call to os.Exit(0). +// This is in the testlog package because, like other definitions in +// package testlog, it is a hook between the testing package and the +// os package. This is used to ensure that an early call to os.Exit(0) +// does not cause a test to pass. +func PanicOnExit0() bool { + panicOnExit0.mu.Lock() + defer panicOnExit0.mu.Unlock() + return panicOnExit0.val +} + +// panicOnExit0 is the flag used for PanicOnExit0. This uses a lock +// because the value can be cleared via a timer call that may race +// with calls to os.Exit +var panicOnExit0 struct { + mu sync.Mutex + val bool +} + +// SetPanicOnExit0 sets panicOnExit0 to v. +// +// SetPanicOnExit0 should be an internal detail, +// but alternate implementations of go test in other +// build systems may need to access it using linkname. +// +// Do not remove or change the type signature. +// See go.dev/issue/67401. +// +//go:linkname SetPanicOnExit0 +func SetPanicOnExit0(v bool) { + panicOnExit0.mu.Lock() + defer panicOnExit0.mu.Unlock() + panicOnExit0.val = v +} diff --git a/testing/internal/testlog/log.go b/testing/internal/testlog/log.go new file mode 100644 index 0000000..d8b9dcf --- /dev/null +++ b/testing/internal/testlog/log.go @@ -0,0 +1,68 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package testlog provides a back-channel communication path +// between tests and package os, so that cmd/go can see which +// environment variables and files a test consults. +package testlog + +import "sync/atomic" + +// Interface is the interface required of test loggers. +// The os package will invoke the interface's methods to indicate that +// it is inspecting the given environment variables or files. +// Multiple goroutines may call these methods simultaneously. +type Interface interface { + Getenv(key string) + Stat(file string) + Open(file string) + Chdir(dir string) +} + +// logger is the current logger Interface. +// We use an atomic.Pointer in case test startup +// is racing with goroutines started during init. +// That must not cause a race detector failure, +// although it will still result in limited visibility +// into exactly what those goroutines do. +var logger atomic.Pointer[Interface] + +// SetLogger sets the test logger implementation for the current process. +// It must be called only once, at process startup. +func SetLogger(impl Interface) { + if !logger.CompareAndSwap(nil, &impl) { + panic("testlog: SetLogger must be called only once") + } +} + +// Logger returns the current test logger implementation. +// It returns nil if there is no logger. +func Logger() Interface { + impl := logger.Load() + if impl == nil { + return nil + } + return *impl +} + +// Getenv calls Logger().Getenv, if a logger has been set. +func Getenv(name string) { + if log := Logger(); log != nil { + log.Getenv(name) + } +} + +// Open calls Logger().Open, if a logger has been set. +func Open(name string) { + if log := Logger(); log != nil { + log.Open(name) + } +} + +// Stat calls Logger().Stat, if a logger has been set. +func Stat(name string) { + if log := Logger(); log != nil { + log.Stat(name) + } +} diff --git a/testing/internal/txtar/archive.go b/testing/internal/txtar/archive.go new file mode 100644 index 0000000..fd95f1e --- /dev/null +++ b/testing/internal/txtar/archive.go @@ -0,0 +1,140 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package txtar implements a trivial text-based file archive format. +// +// The goals for the format are: +// +// - be trivial enough to create and edit by hand. +// - be able to store trees of text files describing go command test cases. +// - diff nicely in git history and code reviews. +// +// Non-goals include being a completely general archive format, +// storing binary data, storing file modes, storing special files like +// symbolic links, and so on. +// +// # Txtar format +// +// A txtar archive is zero or more comment lines and then a sequence of file entries. +// Each file entry begins with a file marker line of the form "-- FILENAME --" +// and is followed by zero or more file content lines making up the file data. +// The comment or file content ends at the next file marker line. +// The file marker line must begin with the three-byte sequence "-- " +// and end with the three-byte sequence " --", but the enclosed +// file name can be surrounding by additional white space, +// all of which is stripped. +// +// If the txtar file is missing a trailing newline on the final line, +// parsers should consider a final newline to be present anyway. +// +// There are no possible syntax errors in a txtar archive. +package txtar + +import ( + "bytes" + "fmt" + "os" + "strings" +) + +// An Archive is a collection of files. +type Archive struct { + Comment []byte + Files []File +} + +// A File is a single file in an archive. +type File struct { + Name string // name of file ("foo/bar.txt") + Data []byte // text content of file +} + +// Format returns the serialized form of an Archive. +// It is assumed that the Archive data structure is well-formed: +// a.Comment and all a.File[i].Data contain no file marker lines, +// and all a.File[i].Name is non-empty. +func Format(a *Archive) []byte { + var buf bytes.Buffer + buf.Write(fixNL(a.Comment)) + for _, f := range a.Files { + fmt.Fprintf(&buf, "-- %s --\n", f.Name) + buf.Write(fixNL(f.Data)) + } + return buf.Bytes() +} + +// ParseFile parses the named file as an archive. +func ParseFile(file string) (*Archive, error) { + data, err := os.ReadFile(file) + if err != nil { + return nil, err + } + return Parse(data), nil +} + +// Parse parses the serialized form of an Archive. +// The returned Archive holds slices of data. +func Parse(data []byte) *Archive { + a := new(Archive) + var name string + a.Comment, name, data = findFileMarker(data) + for name != "" { + f := File{name, nil} + f.Data, name, data = findFileMarker(data) + a.Files = append(a.Files, f) + } + return a +} + +var ( + newlineMarker = []byte("\n-- ") + marker = []byte("-- ") + markerEnd = []byte(" --") +) + +// findFileMarker finds the next file marker in data, +// extracts the file name, and returns the data before the marker, +// the file name, and the data after the marker. +// If there is no next marker, findFileMarker returns before = fixNL(data), name = "", after = nil. +func findFileMarker(data []byte) (before []byte, name string, after []byte) { + var i int + for { + if name, after = isMarker(data[i:]); name != "" { + return data[:i], name, after + } + j := bytes.Index(data[i:], newlineMarker) + if j < 0 { + return fixNL(data), "", nil + } + i += j + 1 // positioned at start of new possible marker + } +} + +// isMarker checks whether data begins with a file marker line. +// If so, it returns the name from the line and the data after the line. +// Otherwise it returns name == "" with an unspecified after. +func isMarker(data []byte) (name string, after []byte) { + if !bytes.HasPrefix(data, marker) { + return "", nil + } + if i := bytes.IndexByte(data, '\n'); i >= 0 { + data, after = data[:i], data[i+1:] + } + if !(bytes.HasSuffix(data, markerEnd) && len(data) >= len(marker)+len(markerEnd)) { + return "", nil + } + return strings.TrimSpace(string(data[len(marker) : len(data)-len(markerEnd)])), after +} + +// If data is empty or ends in \n, fixNL returns data. +// Otherwise fixNL returns a new slice consisting of data with a final \n added. +func fixNL(data []byte) []byte { + if len(data) == 0 || data[len(data)-1] == '\n' { + return data + } + d := make([]byte, len(data)+1) + copy(d, data) + d[len(data)] = '\n' + return d +} diff --git a/testing/internal_race.patch b/testing/internal_race.patch new file mode 100644 index 0000000..6f32c10 --- /dev/null +++ b/testing/internal_race.patch @@ -0,0 +1,69 @@ +diff --git a/internal/race/norace.go b/internal/race/norace.go +index fc6e13f..346564b 100644 +--- a/internal/race/norace.go ++++ b/internal/race/norace.go +@@ -8,8 +8,6 @@ package race + + import ( + "unsafe" +- +- "github.com/CodSpeedHQ/codspeed-go/testing/internal/abi" + ) + + const Enabled = false +@@ -35,8 +33,8 @@ func Read(addr unsafe.Pointer) { + func ReadPC(addr unsafe.Pointer, callerpc, pc uintptr) { + } + +-func ReadObjectPC(t *abi.Type, addr unsafe.Pointer, callerpc, pc uintptr) { +-} ++// func ReadObjectPC(t *abi.Type, addr unsafe.Pointer, callerpc, pc uintptr) { ++// } + + func Write(addr unsafe.Pointer) { + } +@@ -44,8 +42,8 @@ func Write(addr unsafe.Pointer) { + func WritePC(addr unsafe.Pointer, callerpc, pc uintptr) { + } + +-func WriteObjectPC(t *abi.Type, addr unsafe.Pointer, callerpc, pc uintptr) { +-} ++// func WriteObjectPC(t *abi.Type, addr unsafe.Pointer, callerpc, pc uintptr) { ++// } + + func ReadRange(addr unsafe.Pointer, len int) { + } +diff --git a/internal/race/race.go b/internal/race/race.go +index d345009..ef54ea5 100644 +--- a/internal/race/race.go ++++ b/internal/race/race.go +@@ -7,7 +7,6 @@ + package race + + import ( +- "github.com/CodSpeedHQ/codspeed-go/testing/internal/abi" + "unsafe" + ) + +@@ -36,8 +35,8 @@ func Read(addr unsafe.Pointer) + //go:linkname ReadPC + func ReadPC(addr unsafe.Pointer, callerpc, pc uintptr) + +-//go:linkname ReadObjectPC +-func ReadObjectPC(t *abi.Type, addr unsafe.Pointer, callerpc, pc uintptr) ++// //go:linkname ReadObjectPC ++// func ReadObjectPC(t *abi.Type, addr unsafe.Pointer, callerpc, pc uintptr) + + //go:linkname Write + func Write(addr unsafe.Pointer) +@@ -45,8 +44,8 @@ func Write(addr unsafe.Pointer) + //go:linkname WritePC + func WritePC(addr unsafe.Pointer, callerpc, pc uintptr) + +-//go:linkname WriteObjectPC +-func WriteObjectPC(t *abi.Type, addr unsafe.Pointer, callerpc, pc uintptr) ++// //go:linkname WriteObjectPC ++// func WriteObjectPC(t *abi.Type, addr unsafe.Pointer, callerpc, pc uintptr) + + //go:linkname ReadRange + func ReadRange(addr unsafe.Pointer, len int) diff --git a/testing/testing/allocs.go b/testing/testing/allocs.go new file mode 100644 index 0000000..1eeb2d4 --- /dev/null +++ b/testing/testing/allocs.go @@ -0,0 +1,45 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package testing + +import ( + "runtime" +) + +// AllocsPerRun returns the average number of allocations during calls to f. +// Although the return value has type float64, it will always be an integral value. +// +// To compute the number of allocations, the function will first be run once as +// a warm-up. The average number of allocations over the specified number of +// runs will then be measured and returned. +// +// AllocsPerRun sets GOMAXPROCS to 1 during its measurement and will restore +// it before returning. +func AllocsPerRun(runs int, f func()) (avg float64) { + defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(1)) + + // Warm up the function + f() + + // Measure the starting statistics + var memstats runtime.MemStats + runtime.ReadMemStats(&memstats) + mallocs := 0 - memstats.Mallocs + + // Run the function the specified number of times + for i := 0; i < runs; i++ { + f() + } + + // Read the final statistics + runtime.ReadMemStats(&memstats) + mallocs += memstats.Mallocs + + // Average the mallocs over the runs (not counting the warm-up). + // We are forced to return a float64 because the API is silly, but do + // the division as integers so we can ask if AllocsPerRun()==1 + // instead of AllocsPerRun()<2. + return float64(mallocs / uint64(runs)) +} diff --git a/testing/testing/allocs_test.go b/testing/testing/allocs_test.go new file mode 100644 index 0000000..bbd3ae7 --- /dev/null +++ b/testing/testing/allocs_test.go @@ -0,0 +1,29 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package testing_test + +import "testing" + +var global any + +var allocsPerRunTests = []struct { + name string + fn func() + allocs float64 +}{ + {"alloc *byte", func() { global = new(*byte) }, 1}, + {"alloc complex128", func() { global = new(complex128) }, 1}, + {"alloc float64", func() { global = new(float64) }, 1}, + {"alloc int32", func() { global = new(int32) }, 1}, + {"alloc byte", func() { global = new(byte) }, 1}, +} + +func TestAllocsPerRun(t *testing.T) { + for _, tt := range allocsPerRunTests { + if allocs := testing.AllocsPerRun(100, tt.fn); allocs != tt.allocs { + t.Errorf("AllocsPerRun(100, %s) = %v, want %v", tt.name, allocs, tt.allocs) + } + } +} diff --git a/testing/testing/benchmark.go b/testing/testing/benchmark.go new file mode 100644 index 0000000..dc69f76 --- /dev/null +++ b/testing/testing/benchmark.go @@ -0,0 +1,1118 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package testing + +import ( + "context" + "crypto/rand" + "encoding/hex" + "encoding/json" + "flag" + "fmt" + "io" + "math" + "os" + "path/filepath" + "runtime" + "slices" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + "unicode" + + "github.com/CodSpeedHQ/codspeed-go/testing/internal/sysinfo" +) + +func initBenchmarkFlags() { + matchBenchmarks = flag.String("test.bench", "", "run only benchmarks matching `regexp`") + benchmarkMemory = flag.Bool("test.benchmem", false, "print memory allocations for benchmarks") + flag.Var(&benchTime, "test.benchtime", "run each benchmark for duration `d` or N times if `d` is of the form Nx") +} + +var ( + matchBenchmarks *string + benchmarkMemory *bool + + benchTime = durationOrCountFlag{d: 1 * time.Second} // changed during test of testing package +) + +type durationOrCountFlag struct { + d time.Duration + n int + allowZero bool +} + +func (f *durationOrCountFlag) String() string { + if f.n > 0 { + return fmt.Sprintf("%dx", f.n) + } + return f.d.String() +} + +func (f *durationOrCountFlag) Set(s string) error { + if strings.HasSuffix(s, "x") { + n, err := strconv.ParseInt(s[:len(s)-1], 10, 0) + if err != nil || n < 0 || (!f.allowZero && n == 0) { + return fmt.Errorf("invalid count") + } + *f = durationOrCountFlag{n: int(n)} + return nil + } + d, err := time.ParseDuration(s) + if err != nil || d < 0 || (!f.allowZero && d == 0) { + return fmt.Errorf("invalid duration") + } + *f = durationOrCountFlag{d: d} + return nil +} + +// Global lock to ensure only one benchmark runs at a time. +var benchmarkLock sync.Mutex + +// Used for every benchmark for measuring memory. +var memStats runtime.MemStats + +// InternalBenchmark is an internal type but exported because it is cross-package; +// it is part of the implementation of the "go test" command. +type InternalBenchmark struct { + Name string + F func(b *B) +} + +// B is a type passed to [Benchmark] functions to manage benchmark +// timing and control the number of iterations. +// +// A benchmark ends when its Benchmark function returns or calls any of the methods +// FailNow, Fatal, Fatalf, SkipNow, Skip, or Skipf. Those methods must be called +// only from the goroutine running the Benchmark function. +// The other reporting methods, such as the variations of Log and Error, +// may be called simultaneously from multiple goroutines. +// +// Like in tests, benchmark logs are accumulated during execution +// and dumped to standard output when done. Unlike in tests, benchmark logs +// are always printed, so as not to hide output whose existence may be +// affecting benchmark results. +type B struct { + common + importPath string // import path of the package containing the benchmark + bstate *benchState + N int + previousN int // number of iterations in the previous run + previousDuration time.Duration // total duration of the previous run + benchFunc func(b *B) + benchTime durationOrCountFlag + bytes int64 + missingBytes bool // one of the subbenchmarks does not have bytes set. + timerOn bool + showAllocResult bool + result BenchmarkResult + parallelism int // RunParallel creates parallelism*GOMAXPROCS goroutines + // The initial states of memStats.Mallocs and memStats.TotalAlloc. + startAllocs uint64 + startBytes uint64 + // The net total of this test after being run. + netAllocs uint64 + netBytes uint64 + // Extra metrics collected by ReportMetric. + extra map[string]float64 + + // loop tracks the state of B.Loop + loop struct { + // n is the target number of iterations. It gets bumped up as we go. + // When the benchmark loop is done, we commit this to b.N so users can + // do reporting based on it, but we avoid exposing it until then. + n uint64 + // i is the current Loop iteration. It's strictly monotonically + // increasing toward n. + // + // The high bit is used to poison the Loop fast path and fall back to + // the slow path. + i uint64 + + done bool // set when B.Loop return false + } +} + +// StartTimer starts timing a test. This function is called automatically +// before a benchmark starts, but it can also be used to resume timing after +// a call to [B.StopTimer]. +func (b *B) StartTimer() { + if !b.timerOn { + // runtime.ReadMemStats(&memStats) + // b.startAllocs = memStats.Mallocs + // b.startBytes = memStats.TotalAlloc + b.start = highPrecisionTimeNow() + b.timerOn = true + // b.loop.i &^= loopPoisonTimer + } +} + +// StopTimer stops timing a test. This can be used to pause the timer +// while performing steps that you don't want to measure. +func (b *B) StopTimer() { + if b.timerOn { + b.codspeedTimePerRoundNs = append(b.codspeedTimePerRoundNs, highPrecisionTimeSince(b.start)) + b.duration += highPrecisionTimeSince(b.start) + // runtime.ReadMemStats(&memStats) + // b.netAllocs += memStats.Mallocs - b.startAllocs + // b.netBytes += memStats.TotalAlloc - b.startBytes + b.timerOn = false + // If we hit B.Loop with the timer stopped, fail. + // b.loop.i |= loopPoisonTimer + } +} + +// ResetTimer zeroes the elapsed benchmark time and memory allocation counters +// and deletes user-reported metrics. +// It does not affect whether the timer is running. +func (b *B) ResetTimer() { + if b.extra == nil { + // Allocate the extra map before reading memory stats. + // Pre-size it to make more allocation unlikely. + b.extra = make(map[string]float64, 16) + } else { + clear(b.extra) + } + if b.timerOn { + runtime.ReadMemStats(&memStats) + b.startAllocs = memStats.Mallocs + b.startBytes = memStats.TotalAlloc + b.start = highPrecisionTimeNow() + } + b.duration = 0 + b.netAllocs = 0 + b.netBytes = 0 +} + +// SetBytes records the number of bytes processed in a single operation. +// If this is called, the benchmark will report ns/op and MB/s. +func (b *B) SetBytes(n int64) { b.bytes = n } + +// ReportAllocs enables malloc statistics for this benchmark. +// It is equivalent to setting -test.benchmem, but it only affects the +// benchmark function that calls ReportAllocs. +func (b *B) ReportAllocs() { + b.showAllocResult = true +} + +// runN runs a single benchmark for the specified number of iterations. +func (b *B) runN(n int) { + benchmarkLock.Lock() + defer benchmarkLock.Unlock() + ctx, cancelCtx := context.WithCancel(context.Background()) + defer func() { + b.runCleanup(normalPanic) + b.checkRaces() + }() + // Try to get a comparable environment for each run + // by clearing garbage from previous runs. + runtime.GC() + b.resetRaces() + b.N = n + b.loop.n = 0 + b.loop.i = 0 + b.loop.done = false + b.ctx = ctx + b.cancelCtx = cancelCtx + + b.parallelism = 1 + b.ResetTimer() + b.StartTimer() + b.benchFunc(b) + b.StopTimer() + b.previousN = n + b.previousDuration = b.duration + + b.codspeedItersPerRound = append(b.codspeedItersPerRound, int64(n)) + + if b.loop.n > 0 && !b.loop.done && !b.failed { + b.Error("benchmark function returned without B.Loop() == false (break or return in loop?)") + } +} + +// run1 runs the first iteration of benchFunc. It reports whether more +// iterations of this benchmarks should be run. +func (b *B) run1() bool { + if bstate := b.bstate; bstate != nil { + // Extend maxLen, if needed. + if n := len(b.name) + bstate.extLen + 1; n > bstate.maxLen { + bstate.maxLen = n + 8 // Add additional slack to avoid too many jumps in size. + } + } + go func() { + // Signal that we're done whether we return normally + // or by FailNow's runtime.Goexit. + defer func() { + b.signal <- true + }() + + b.runN(1) + }() + <-b.signal + if b.failed { + fmt.Fprintf(b.w, "%s--- FAIL: %s\n%s", b.chatty.prefix(), b.name, b.output) + return false + } + // Only print the output if we know we are not going to proceed. + // Otherwise it is printed in processBench. + b.mu.RLock() + finished := b.finished + b.mu.RUnlock() + if b.hasSub.Load() || finished { + tag := "BENCH" + if b.skipped { + tag = "SKIP" + } + if b.chatty != nil && (len(b.output) > 0 || finished) { + b.trimOutput() + fmt.Fprintf(b.w, "%s--- %s: %s\n%s", b.chatty.prefix(), tag, b.name, b.output) + } + return false + } + return true +} + +var labelsOnce sync.Once + +// run executes the benchmark in a separate goroutine, including all of its +// subbenchmarks. b must not have subbenchmarks. +func (b *B) run() { + labelsOnce.Do(func() { + fmt.Fprintf(b.w, "Running with CodSpeed instrumentation\n") + + fmt.Fprintf(b.w, "goos: %s\n", runtime.GOOS) + fmt.Fprintf(b.w, "goarch: %s\n", runtime.GOARCH) + if b.importPath != "" { + fmt.Fprintf(b.w, "pkg: %s\n", b.importPath) + } + if cpu := sysinfo.CPUName(); cpu != "" { + fmt.Fprintf(b.w, "cpu: %s\n", cpu) + } + }) + if b.bstate != nil { + // Running go test --test.bench + b.bstate.processBench(b) // Must call doBench. + } else { + // Running func Benchmark. + b.doBench() + } +} + +func (b *B) doBench() BenchmarkResult { + go b.launch() + <-b.signal + return b.result +} + +func predictN(goalns int64, prevIters int64, prevns int64, last int64) int { + if prevns == 0 { + // Round up to dodge divide by zero. See https://go.dev/issue/70709. + prevns = 1 + } + + // Order of operations matters. + // For very fast benchmarks, prevIters ~= prevns. + // If you divide first, you get 0 or 1, + // which can hide an order of magnitude in execution time. + // So multiply first, then divide. + n := goalns * prevIters / prevns + // Run more iterations than we think we'll need (1.2x). + n += n / 5 + // Don't grow too fast in case we had timing errors previously. + n = min(n, 100*last) + // Be sure to run at least one more than last time. + n = max(n, last+1) + // Don't run more than 1e9 times. (This also keeps n in int range on 32 bit platforms.) + n = min(n, 1e9) + return int(n) +} + +// launch launches the benchmark function. It gradually increases the number +// of benchmark iterations until the benchmark runs for the requested benchtime. +// launch is run by the doBench function as a separate goroutine. +// run1 must have been called on b. +func (b *B) launch() { + // Signal that we're done whether we return normally + // or by FailNow's runtime.Goexit. + defer func() { + b.signal <- true + }() + + // b.Loop does its own ramp-up logic so we just need to run it once. + // If b.loop.n is non zero, it means b.Loop has already run. + if b.loop.n == 0 { + // Run the benchmark for at least the specified amount of time. + if b.benchTime.n > 0 { + // We already ran a single iteration in run1. + // If -benchtime=1x was requested, use that result. + // See https://golang.org/issue/32051. + if b.benchTime.n > 1 { + b.runN(b.benchTime.n) + } + } else { + warmupD := time.Millisecond * 500 + warmupN := int64(1) + for n := int64(1); !b.failed && b.duration < warmupD && n < 1e9; { + last := n + // Predict required iterations. + goalns := warmupD.Nanoseconds() + prevIters := int64(b.N) + n = int64(predictN(goalns, prevIters, b.duration.Nanoseconds(), last)) + b.runN(int(n)) + warmupN = n + } + + // Reset the fields from the warmup run + b.codspeedItersPerRound = make([]int64, 0) + b.codspeedTimePerRoundNs = make([]time.Duration, 0) + + // Final run: + benchD := time.Second * b.benchTime.d + benchN := predictN(benchD.Nanoseconds(), int64(b.N), b.duration.Nanoseconds(), warmupN) + rounds := 100 // TODO: Compute the rounds in a better way + roundN := benchN / int(rounds) + + for range rounds { + b.runN(int(roundN)) + } + } + } + b.result = BenchmarkResult{b.N, b.duration, b.bytes, b.netAllocs, b.netBytes, b.codspeedTimePerRoundNs, b.codspeedItersPerRound, b.extra} +} + +// Elapsed returns the measured elapsed time of the benchmark. +// The duration reported by Elapsed matches the one measured by +// [B.StartTimer], [B.StopTimer], and [B.ResetTimer]. +func (b *B) Elapsed() time.Duration { + d := b.duration + if b.timerOn { + d += highPrecisionTimeSince(b.start) + } + return d +} + +// ReportMetric adds "n unit" to the reported benchmark results. +// If the metric is per-iteration, the caller should divide by b.N, +// and by convention units should end in "/op". +// ReportMetric overrides any previously reported value for the same unit. +// ReportMetric panics if unit is the empty string or if unit contains +// any whitespace. +// If unit is a unit normally reported by the benchmark framework itself +// (such as "allocs/op"), ReportMetric will override that metric. +// Setting "ns/op" to 0 will suppress that built-in metric. +func (b *B) ReportMetric(n float64, unit string) { + if unit == "" { + panic("metric unit must not be empty") + } + if strings.IndexFunc(unit, unicode.IsSpace) >= 0 { + panic("metric unit must not contain whitespace") + } + b.extra[unit] = n +} + +func (b *B) stopOrScaleBLoop() bool { + t := b.Elapsed() + if t >= b.benchTime.d { + // Stop the timer so we don't count cleanup time + b.StopTimer() + // Commit iteration count + b.N = int(b.loop.n) + b.loop.done = true + return false + } + // Loop scaling + goalns := b.benchTime.d.Nanoseconds() + prevIters := int64(b.loop.n) + b.loop.n = uint64(predictN(goalns, prevIters, t.Nanoseconds(), prevIters)) + if b.loop.n&loopPoisonMask != 0 { + // The iteration count should never get this high, but if it did we'd be + // in big trouble. + panic("loop iteration target overflow") + } + b.loop.i++ + + b.StartTimer() + return true +} + +func (b *B) loopSlowPath() bool { + // Consistency checks + // if !b.timerOn { + // b.Fatal("B.Loop called with timer stopped") + // } + if b.loop.i&loopPoisonMask != 0 { + panic(fmt.Sprintf("unknown loop stop condition: %#x", b.loop.i)) + } + + if b.loop.n == 0 { + // If it's the first call to b.Loop() in the benchmark function. + // Allows more precise measurement of benchmark loop cost counts. + // Also initialize target to 1 to kick start loop scaling. + b.loop.n = 1 + // Within a b.Loop loop, we don't use b.N (to avoid confusion). + b.N = 0 + b.loop.i++ + + b.codspeedItersPerRound = make([]int64, 0) + b.codspeedTimePerRoundNs = make([]time.Duration, 0) + + b.ResetTimer() + b.StartTimer() + return true + } + // Handles fixed iterations case + if b.benchTime.n > 0 { + if b.loop.n < uint64(b.benchTime.n) { + b.loop.n = uint64(b.benchTime.n) + b.loop.i++ + b.ResetTimer() + b.StartTimer() + return true + } + b.StopTimer() + // Commit iteration count + b.N = int(b.loop.n) + b.loop.done = true + return false + } + // Handles fixed time case + return b.stopOrScaleBLoop() +} + +// Loop returns true as long as the benchmark should continue running. +// +// A typical benchmark is structured like: +// +// func Benchmark(b *testing.B) { +// ... setup ... +// for b.Loop() { +// ... code to measure ... +// } +// ... cleanup ... +// } +// +// Loop resets the benchmark timer the first time it is called in a benchmark, +// so any setup performed prior to starting the benchmark loop does not count +// toward the benchmark measurement. Likewise, when it returns false, it stops +// the timer so cleanup code is not measured. +// +// The compiler never optimizes away calls to functions within the body of a +// "for b.Loop() { ... }" loop. This prevents surprises that can otherwise occur +// if the compiler determines that the result of a benchmarked function is +// unused. The loop must be written in exactly this form, and this only applies +// to calls syntactically between the curly braces of the loop. Optimizations +// are performed as usual in any functions called by the loop. +// +// After Loop returns false, b.N contains the total number of iterations that +// ran, so the benchmark may use b.N to compute other average metrics. +// +// Prior to the introduction of Loop, benchmarks were expected to contain an +// explicit loop from 0 to b.N. Benchmarks should either use Loop or contain a +// loop to b.N, but not both. Loop offers more automatic management of the +// benchmark timer, and runs each benchmark function only once per measurement, +// whereas b.N-based benchmarks must run the benchmark function (and any +// associated setup and cleanup) several times. +func (b *B) Loop() bool { + b.StopTimer() + // This is written such that the fast path is as fast as possible and can be + // inlined. + // + // There are three cases where we'll fall out of the fast path: + // + // - On the first call, both i and n are 0. + // + // - If the loop reaches the n'th iteration, then i == n and we need + // to figure out the new target iteration count or if we're done. + // + // - If the timer is stopped, it poisons the top bit of i so the slow + // path can do consistency checks and fail. + if b.loop.i < b.loop.n { + b.loop.i++ + b.StartTimer() + return true + } + return b.loopSlowPath() +} + +// The loopPoison constants can be OR'd into B.loop.i to cause it to fall back +// to the slow path. +const ( + loopPoisonTimer = uint64(1 << (63 - iota)) + // If necessary, add more poison bits here. + + // loopPoisonMask is the set of all loop poison bits. (iota-1) is the index + // of the bit we just set, from which we recreate that bit mask. We subtract + // 1 to set all of the bits below that bit, then complement the result to + // get the mask. Sorry, not sorry. + loopPoisonMask = ^uint64((1 << (63 - (iota - 1))) - 1) +) + +// BenchmarkResult contains the results of a benchmark run. +type BenchmarkResult struct { + N int // The number of iterations. + T time.Duration // The total time taken. + Bytes int64 // Bytes processed in one iteration. + MemAllocs uint64 // The total number of memory allocations. + MemBytes uint64 // The total number of bytes allocated. + + CodspeedTimePerRoundNs []time.Duration + CodspeedItersPerRound []int64 + + // Extra records additional metrics reported by ReportMetric. + Extra map[string]float64 +} + +// NsPerOp returns the "ns/op" metric. +func (r BenchmarkResult) NsPerOp() int64 { + if v, ok := r.Extra["ns/op"]; ok { + return int64(v) + } + if r.N <= 0 { + return 0 + } + return r.T.Nanoseconds() / int64(r.N) +} + +// mbPerSec returns the "MB/s" metric. +func (r BenchmarkResult) mbPerSec() float64 { + if v, ok := r.Extra["MB/s"]; ok { + return v + } + if r.Bytes <= 0 || r.T <= 0 || r.N <= 0 { + return 0 + } + return (float64(r.Bytes) * float64(r.N) / 1e6) / r.T.Seconds() +} + +// AllocsPerOp returns the "allocs/op" metric, +// which is calculated as r.MemAllocs / r.N. +func (r BenchmarkResult) AllocsPerOp() int64 { + if v, ok := r.Extra["allocs/op"]; ok { + return int64(v) + } + if r.N <= 0 { + return 0 + } + return int64(r.MemAllocs) / int64(r.N) +} + +// AllocedBytesPerOp returns the "B/op" metric, +// which is calculated as r.MemBytes / r.N. +func (r BenchmarkResult) AllocedBytesPerOp() int64 { + if v, ok := r.Extra["B/op"]; ok { + return int64(v) + } + if r.N <= 0 { + return 0 + } + return int64(r.MemBytes) / int64(r.N) +} + +// String returns a summary of the benchmark results. +// It follows the benchmark result line format from +// https://golang.org/design/14313-benchmark-format, not including the +// benchmark name. +// Extra metrics override built-in metrics of the same name. +// String does not include allocs/op or B/op, since those are reported +// by [BenchmarkResult.MemString]. +func (r BenchmarkResult) String() string { + buf := new(strings.Builder) + fmt.Fprintf(buf, "%8d", r.N) + + // Get ns/op as a float. + ns, ok := r.Extra["ns/op"] + if !ok { + ns = float64(r.T.Nanoseconds()) / float64(r.N) + } + if ns != 0 { + buf.WriteByte('\t') + prettyPrint(buf, ns, "ns/op") + } + + if mbs := r.mbPerSec(); mbs != 0 { + fmt.Fprintf(buf, "\t%7.2f MB/s", mbs) + } + + // Print extra metrics that aren't represented in the standard + // metrics. + var extraKeys []string + for k := range r.Extra { + switch k { + case "ns/op", "MB/s", "B/op", "allocs/op": + // Built-in metrics reported elsewhere. + continue + } + extraKeys = append(extraKeys, k) + } + slices.Sort(extraKeys) + for _, k := range extraKeys { + buf.WriteByte('\t') + prettyPrint(buf, r.Extra[k], k) + } + return buf.String() +} + +func prettyPrint(w io.Writer, x float64, unit string) { + // Print all numbers with 10 places before the decimal point + // and small numbers with four sig figs. Field widths are + // chosen to fit the whole part in 10 places while aligning + // the decimal point of all fractional formats. + var format string + switch y := math.Abs(x); { + case y == 0 || y >= 999.95: + format = "%10.0f %s" + case y >= 99.995: + format = "%12.1f %s" + case y >= 9.9995: + format = "%13.2f %s" + case y >= 0.99995: + format = "%14.3f %s" + case y >= 0.099995: + format = "%15.4f %s" + case y >= 0.0099995: + format = "%16.5f %s" + case y >= 0.00099995: + format = "%17.6f %s" + default: + format = "%18.7f %s" + } + fmt.Fprintf(w, format, x, unit) +} + +// MemString returns r.AllocedBytesPerOp and r.AllocsPerOp in the same format as 'go test'. +func (r BenchmarkResult) MemString() string { + return fmt.Sprintf("%8d B/op\t%8d allocs/op", + r.AllocedBytesPerOp(), r.AllocsPerOp()) +} + +// benchmarkName returns full name of benchmark including procs suffix. +func benchmarkName(name string, n int) string { + if n != 1 { + return fmt.Sprintf("%s-%d", name, n) + } + return name +} + +type benchState struct { + match *matcher + + maxLen int // The largest recorded benchmark name. + extLen int // Maximum extension length. +} + +// RunBenchmarks is an internal function but exported because it is cross-package; +// it is part of the implementation of the "go test" command. +func RunBenchmarks(matchString func(pat, str string) (bool, error), benchmarks []InternalBenchmark) { + runBenchmarks("", matchString, benchmarks) +} + +func runBenchmarks(importPath string, matchString func(pat, str string) (bool, error), benchmarks []InternalBenchmark) bool { + // If no flag was specified, don't run benchmarks. + if len(*matchBenchmarks) == 0 { + return true + } + // Collect matching benchmarks and determine longest name. + maxprocs := 1 + for _, procs := range cpuList { + if procs > maxprocs { + maxprocs = procs + } + } + bstate := &benchState{ + match: newMatcher(matchString, *matchBenchmarks, "-test.bench", *skip), + extLen: len(benchmarkName("", maxprocs)), + } + var bs []InternalBenchmark + for _, Benchmark := range benchmarks { + if _, matched, _ := bstate.match.fullName(nil, Benchmark.Name); matched { + bs = append(bs, Benchmark) + benchName := benchmarkName(Benchmark.Name, maxprocs) + if l := len(benchName) + bstate.extLen + 1; l > bstate.maxLen { + bstate.maxLen = l + } + } + } + main := &B{ + common: common{ + name: "Main", + w: os.Stdout, + bench: true, + }, + importPath: importPath, + benchFunc: func(b *B) { + for _, Benchmark := range bs { + b.Run(Benchmark.Name, Benchmark.F) + } + }, + benchTime: benchTime, + bstate: bstate, + } + if Verbose() { + main.chatty = newChattyPrinter(main.w) + } + main.runN(1) + return !main.failed +} + +// processBench runs bench b for the configured CPU counts and prints the results. +func (s *benchState) processBench(b *B) { + for i, procs := range cpuList { + for j := uint(0); j < *count; j++ { + runtime.GOMAXPROCS(procs) + benchName := benchmarkName(b.name, procs) + + // If it's chatty, we've already printed this information. + if b.chatty == nil { + fmt.Fprintf(b.w, "%-*s\t", s.maxLen, benchName) + } + // Recompute the running time for all but the first iteration. + if i > 0 || j > 0 { + b = &B{ + common: common{ + signal: make(chan bool), + name: b.name, + w: b.w, + chatty: b.chatty, + bench: true, + }, + benchFunc: b.benchFunc, + benchTime: b.benchTime, + } + b.run1() + } + r := b.doBench() + if b.failed { + // The output could be very long here, but probably isn't. + // We print it all, regardless, because we don't want to trim the reason + // the benchmark failed. + fmt.Fprintf(b.w, "%s--- FAIL: %s\n%s", b.chatty.prefix(), benchName, b.output) + continue + } + results := r.String() + + // ############################################################################################ + // START CODSPEED + type RawResults struct { + BenchmarkName string `json:"benchmark_name"` + Pid int `json:"pid"` + CodspeedTimePerRoundNs []time.Duration `json:"codspeed_time_per_round_ns"` + CodspeedItersPerRound []int64 `json:"codspeed_iters_per_round"` + } + + // Build custom bench name with :: separator + var nameParts []string + current := &b.common + for current.parent != nil { + // Extract the sub-benchmark part by removing parent prefix + parentName := current.parent.name + if strings.HasPrefix(current.name, parentName+"/") { + subName := strings.TrimPrefix(current.name, parentName+"/") + nameParts = append([]string{subName}, nameParts...) + } else { + nameParts = append([]string{current.name}, nameParts...) + } + + if current.parent.name == "Main" { + break + } + current = current.parent + } + customBenchName := strings.Join(nameParts, "::") + + rawResults := RawResults{ + BenchmarkName: customBenchName, + Pid: os.Getpid(), + CodspeedTimePerRoundNs: r.CodspeedTimePerRoundNs, + CodspeedItersPerRound: r.CodspeedItersPerRound, + } + + codspeedProfileFolder := os.Getenv("CODSPEED_PROFILE_FOLDER") + if codspeedProfileFolder == "" { + panic("CODSPEED_PROFILE_FOLDER environment variable is not set") + } + if err := os.MkdirAll(filepath.Join(codspeedProfileFolder, "raw_results"), 0755); err != nil { + fmt.Fprintf(os.Stderr, "failed to create raw results directory: %v\n", err) + continue + } + // Generate random filename to avoid any overwrites + randomBytes := make([]byte, 16) + if _, err := rand.Read(randomBytes); err != nil { + fmt.Fprintf(os.Stderr, "failed to generate random filename: %v\n", err) + continue + } + rawResultsFile := filepath.Join(codspeedProfileFolder, "raw_results", fmt.Sprintf("%s.json", hex.EncodeToString(randomBytes))) + file, err := os.Create(rawResultsFile) + if err != nil { + fmt.Fprintf(os.Stderr, "failed to create raw results file: %v\n", err) + continue + } + output, err := json.MarshalIndent(rawResults, "", " ") + if err != nil { + fmt.Fprintf(os.Stderr, "failed to marshal raw results: %v\n", err) + file.Close() + continue + } + // FIXME: Don't overwrite the file if it already exists + if _, err := file.Write(output); err != nil { + fmt.Fprintf(os.Stderr, "failed to write raw results: %v\n", err) + file.Close() + continue + } + defer file.Close() + // END CODSPEED + // ############################################################################################ + + if b.chatty != nil { + fmt.Fprintf(b.w, "%-*s\t", s.maxLen, benchName) + } + if *benchmarkMemory || b.showAllocResult { + results += "\t" + r.MemString() + } + fmt.Fprintln(b.w, results) + // Unlike with tests, we ignore the -chatty flag and always print output for + // benchmarks since the output generation time will skew the results. + if len(b.output) > 0 { + b.trimOutput() + fmt.Fprintf(b.w, "%s--- BENCH: %s\n%s", b.chatty.prefix(), benchName, b.output) + } + if p := runtime.GOMAXPROCS(-1); p != procs { + fmt.Fprintf(os.Stderr, "testing: %s left GOMAXPROCS set to %d\n", benchName, p) + } + if b.chatty != nil && b.chatty.json { + b.chatty.Updatef("", "=== NAME %s\n", "") + } + } + } +} + +// If hideStdoutForTesting is true, Run does not print the benchName. +// This avoids a spurious print during 'go test' on package testing itself, +// which invokes b.Run in its own tests (see sub_test.go). +var hideStdoutForTesting = false + +// Run benchmarks f as a subbenchmark with the given name. It reports +// whether there were any failures. +// +// A subbenchmark is like any other benchmark. A benchmark that calls Run at +// least once will not be measured itself and will be called once with N=1. +func (b *B) Run(name string, f func(b *B)) bool { + // Since b has subbenchmarks, we will no longer run it as a benchmark itself. + // Release the lock and acquire it on exit to ensure locks stay paired. + b.hasSub.Store(true) + benchmarkLock.Unlock() + defer benchmarkLock.Lock() + + benchName, ok, partial := b.name, true, false + if b.bstate != nil { + benchName, ok, partial = b.bstate.match.fullName(&b.common, name) + } + if !ok { + return true + } + var pc [maxStackLen]uintptr + n := runtime.Callers(2, pc[:]) + sub := &B{ + common: common{ + signal: make(chan bool), + name: benchName, + parent: &b.common, + level: b.level + 1, + creator: pc[:n], + w: b.w, + chatty: b.chatty, + bench: true, + }, + importPath: b.importPath, + benchFunc: f, + benchTime: b.benchTime, + bstate: b.bstate, + } + if partial { + // Partial name match, like -bench=X/Y matching BenchmarkX. + // Only process sub-benchmarks, if any. + sub.hasSub.Store(true) + } + + if b.chatty != nil { + labelsOnce.Do(func() { + fmt.Printf("goos: %s\n", runtime.GOOS) + fmt.Printf("goarch: %s\n", runtime.GOARCH) + if b.importPath != "" { + fmt.Printf("pkg: %s\n", b.importPath) + } + if cpu := sysinfo.CPUName(); cpu != "" { + fmt.Printf("cpu: %s\n", cpu) + } + }) + + if !hideStdoutForTesting { + if b.chatty.json { + b.chatty.Updatef(benchName, "=== RUN %s\n", benchName) + } + fmt.Println(benchName) + } + } + + if sub.run1() { + sub.run() + } + b.add(sub.result) + return !sub.failed +} + +// add simulates running benchmarks in sequence in a single iteration. It is +// used to give some meaningful results in case func Benchmark is used in +// combination with Run. +func (b *B) add(other BenchmarkResult) { + r := &b.result + // The aggregated BenchmarkResults resemble running all subbenchmarks as + // in sequence in a single benchmark. + r.N = 1 + r.T += time.Duration(other.NsPerOp()) + if other.Bytes == 0 { + // Summing Bytes is meaningless in aggregate if not all subbenchmarks + // set it. + b.missingBytes = true + r.Bytes = 0 + } + if !b.missingBytes { + r.Bytes += other.Bytes + } + r.MemAllocs += uint64(other.AllocsPerOp()) + r.MemBytes += uint64(other.AllocedBytesPerOp()) +} + +// trimOutput shortens the output from a benchmark, which can be very long. +func (b *B) trimOutput() { + // The output is likely to appear multiple times because the benchmark + // is run multiple times, but at least it will be seen. This is not a big deal + // because benchmarks rarely print, but just in case, we trim it if it's too long. + const maxNewlines = 10 + for nlCount, j := 0, 0; j < len(b.output); j++ { + if b.output[j] == '\n' { + nlCount++ + if nlCount >= maxNewlines { + b.output = append(b.output[:j], "\n\t... [output truncated]\n"...) + break + } + } + } +} + +// A PB is used by RunParallel for running parallel benchmarks. +type PB struct { + globalN *atomic.Uint64 // shared between all worker goroutines iteration counter + grain uint64 // acquire that many iterations from globalN at once + cache uint64 // local cache of acquired iterations + bN uint64 // total number of iterations to execute (b.N) +} + +// Next reports whether there are more iterations to execute. +func (pb *PB) Next() bool { + if pb.cache == 0 { + n := pb.globalN.Add(pb.grain) + if n <= pb.bN { + pb.cache = pb.grain + } else if n < pb.bN+pb.grain { + pb.cache = pb.bN + pb.grain - n + } else { + return false + } + } + pb.cache-- + return true +} + +// RunParallel runs a benchmark in parallel. +// It creates multiple goroutines and distributes b.N iterations among them. +// The number of goroutines defaults to GOMAXPROCS. To increase parallelism for +// non-CPU-bound benchmarks, call [B.SetParallelism] before RunParallel. +// RunParallel is usually used with the go test -cpu flag. +// +// The body function will be run in each goroutine. It should set up any +// goroutine-local state and then iterate until pb.Next returns false. +// It should not use the [B.StartTimer], [B.StopTimer], or [B.ResetTimer] functions, +// because they have global effect. It should also not call [B.Run]. +// +// RunParallel reports ns/op values as wall time for the benchmark as a whole, +// not the sum of wall time or CPU time over each parallel goroutine. +func (b *B) RunParallel(body func(*PB)) { + if b.N == 0 { + return // Nothing to do when probing. + } + // Calculate grain size as number of iterations that take ~100µs. + // 100µs is enough to amortize the overhead and provide sufficient + // dynamic load balancing. + grain := uint64(0) + if b.previousN > 0 && b.previousDuration > 0 { + grain = 1e5 * uint64(b.previousN) / uint64(b.previousDuration) + } + if grain < 1 { + grain = 1 + } + // We expect the inner loop and function call to take at least 10ns, + // so do not do more than 100µs/10ns=1e4 iterations. + if grain > 1e4 { + grain = 1e4 + } + + var n atomic.Uint64 + numProcs := b.parallelism * runtime.GOMAXPROCS(0) + var wg sync.WaitGroup + wg.Add(numProcs) + for p := 0; p < numProcs; p++ { + go func() { + defer wg.Done() + pb := &PB{ + globalN: &n, + grain: grain, + bN: uint64(b.N), + } + body(pb) + }() + } + wg.Wait() + if n.Load() <= uint64(b.N) && !b.Failed() { + b.Fatal("RunParallel: body exited without pb.Next() == false") + } +} + +// SetParallelism sets the number of goroutines used by [B.RunParallel] to p*GOMAXPROCS. +// There is usually no need to call SetParallelism for CPU-bound benchmarks. +// If p is less than 1, this call will have no effect. +func (b *B) SetParallelism(p int) { + if p >= 1 { + b.parallelism = p + } +} + +// Benchmark benchmarks a single function. It is useful for creating +// custom benchmarks that do not use the "go test" command. +// +// If f depends on testing flags, then [Init] must be used to register +// those flags before calling Benchmark and before calling [flag.Parse]. +// +// If f calls Run, the result will be an estimate of running all its +// subbenchmarks that don't call Run in sequence in a single benchmark. +func Benchmark(f func(b *B)) BenchmarkResult { + b := &B{ + common: common{ + signal: make(chan bool), + w: discard{}, + }, + benchFunc: f, + benchTime: benchTime, + } + if b.run1() { + b.run() + } + return b.result +} + +type discard struct{} + +func (discard) Write(b []byte) (n int, err error) { return len(b), nil } diff --git a/testing/testing/benchmark_test.go b/testing/testing/benchmark_test.go new file mode 100644 index 0000000..e2dd24c --- /dev/null +++ b/testing/testing/benchmark_test.go @@ -0,0 +1,244 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package testing_test + +import ( + "bytes" + "cmp" + "context" + "errors" + "runtime" + "slices" + "strings" + "sync/atomic" + "testing" + "text/template" + "time" +) + +var prettyPrintTests = []struct { + v float64 + expected string +}{ + {0, " 0 x"}, + {1234.1, " 1234 x"}, + {-1234.1, " -1234 x"}, + {999.950001, " 1000 x"}, + {999.949999, " 999.9 x"}, + {99.9950001, " 100.0 x"}, + {99.9949999, " 99.99 x"}, + {-99.9949999, " -99.99 x"}, + {0.000999950001, " 0.001000 x"}, + {0.000999949999, " 0.0009999 x"}, // smallest case + {0.0000999949999, " 0.0001000 x"}, +} + +func TestPrettyPrint(t *testing.T) { + for _, tt := range prettyPrintTests { + buf := new(strings.Builder) + testing.PrettyPrint(buf, tt.v, "x") + if tt.expected != buf.String() { + t.Errorf("prettyPrint(%v): expected %q, actual %q", tt.v, tt.expected, buf.String()) + } + } +} + +func TestResultString(t *testing.T) { + // Test fractional ns/op handling + r := testing.BenchmarkResult{ + N: 100, + T: 240 * time.Nanosecond, + } + if r.NsPerOp() != 2 { + t.Errorf("NsPerOp: expected 2, actual %v", r.NsPerOp()) + } + if want, got := " 100\t 2.400 ns/op", r.String(); want != got { + t.Errorf("String: expected %q, actual %q", want, got) + } + + // Test sub-1 ns/op (issue #31005) + r.T = 40 * time.Nanosecond + if want, got := " 100\t 0.4000 ns/op", r.String(); want != got { + t.Errorf("String: expected %q, actual %q", want, got) + } + + // Test 0 ns/op + r.T = 0 + if want, got := " 100", r.String(); want != got { + t.Errorf("String: expected %q, actual %q", want, got) + } +} + +func TestRunParallel(t *testing.T) { + if testing.Short() { + t.Skip("skipping in short mode") + } + testing.Benchmark(func(b *testing.B) { + procs := uint32(0) + iters := uint64(0) + b.SetParallelism(3) + b.RunParallel(func(pb *testing.PB) { + atomic.AddUint32(&procs, 1) + for pb.Next() { + atomic.AddUint64(&iters, 1) + } + }) + if want := uint32(3 * runtime.GOMAXPROCS(0)); procs != want { + t.Errorf("got %v procs, want %v", procs, want) + } + if iters != uint64(b.N) { + t.Errorf("got %v iters, want %v", iters, b.N) + } + }) +} + +func TestRunParallelFail(t *testing.T) { + testing.Benchmark(func(b *testing.B) { + b.RunParallel(func(pb *testing.PB) { + // The function must be able to log/abort + // w/o crashing/deadlocking the whole benchmark. + b.Log("log") + b.Error("error") + }) + }) +} + +func TestRunParallelFatal(t *testing.T) { + testing.Benchmark(func(b *testing.B) { + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + if b.N > 1 { + b.Fatal("error") + } + } + }) + }) +} + +func TestRunParallelSkipNow(t *testing.T) { + testing.Benchmark(func(b *testing.B) { + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + if b.N > 1 { + b.SkipNow() + } + } + }) + }) +} + +func TestBenchmarkContext(t *testing.T) { + testing.Benchmark(func(b *testing.B) { + ctx := b.Context() + if err := ctx.Err(); err != nil { + b.Fatalf("expected non-canceled context, got %v", err) + } + + var innerCtx context.Context + b.Run("inner", func(b *testing.B) { + innerCtx = b.Context() + if err := innerCtx.Err(); err != nil { + b.Fatalf("expected inner benchmark to not inherit canceled context, got %v", err) + } + }) + b.Run("inner2", func(b *testing.B) { + if !errors.Is(innerCtx.Err(), context.Canceled) { + t.Fatal("expected context of sibling benchmark to be canceled after its test function finished") + } + }) + + t.Cleanup(func() { + if !errors.Is(ctx.Err(), context.Canceled) { + t.Fatal("expected context canceled before cleanup") + } + }) + }) +} + +func ExampleB_RunParallel() { + // Parallel benchmark for text/template.Template.Execute on a single object. + testing.Benchmark(func(b *testing.B) { + templ := template.Must(template.New("test").Parse("Hello, {{.}}!")) + // RunParallel will create GOMAXPROCS goroutines + // and distribute work among them. + b.RunParallel(func(pb *testing.PB) { + // Each goroutine has its own bytes.Buffer. + var buf bytes.Buffer + for pb.Next() { + // The loop body is executed b.N times total across all goroutines. + buf.Reset() + templ.Execute(&buf, "World") + } + }) + }) +} + +func TestReportMetric(t *testing.T) { + res := testing.Benchmark(func(b *testing.B) { + b.ReportMetric(12345, "ns/op") + b.ReportMetric(0.2, "frobs/op") + }) + // Test built-in overriding. + if res.NsPerOp() != 12345 { + t.Errorf("NsPerOp: expected %v, actual %v", 12345, res.NsPerOp()) + } + // Test stringing. + res.N = 1 // Make the output stable + want := " 1\t 12345 ns/op\t 0.2000 frobs/op" + if want != res.String() { + t.Errorf("expected %q, actual %q", want, res.String()) + } +} + +func ExampleB_ReportMetric() { + // This reports a custom benchmark metric relevant to a + // specific algorithm (in this case, sorting). + testing.Benchmark(func(b *testing.B) { + var compares int64 + for b.Loop() { + s := []int{5, 4, 3, 2, 1} + slices.SortFunc(s, func(a, b int) int { + compares++ + return cmp.Compare(a, b) + }) + } + // This metric is per-operation, so divide by b.N and + // report it as a "/op" unit. + b.ReportMetric(float64(compares)/float64(b.N), "compares/op") + // This metric is per-time, so divide by b.Elapsed and + // report it as a "/ns" unit. + b.ReportMetric(float64(compares)/float64(b.Elapsed().Nanoseconds()), "compares/ns") + }) +} + +func ExampleB_ReportMetric_parallel() { + // This reports a custom benchmark metric relevant to a + // specific algorithm (in this case, sorting) in parallel. + testing.Benchmark(func(b *testing.B) { + var compares atomic.Int64 + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + s := []int{5, 4, 3, 2, 1} + slices.SortFunc(s, func(a, b int) int { + // Because RunParallel runs the function many + // times in parallel, we must increment the + // counter atomically to avoid racing writes. + compares.Add(1) + return cmp.Compare(a, b) + }) + } + }) + + // NOTE: Report each metric once, after all of the parallel + // calls have completed. + + // This metric is per-operation, so divide by b.N and + // report it as a "/op" unit. + b.ReportMetric(float64(compares.Load())/float64(b.N), "compares/op") + // This metric is per-time, so divide by b.Elapsed and + // report it as a "/ns" unit. + b.ReportMetric(float64(compares.Load())/float64(b.Elapsed().Nanoseconds()), "compares/ns") + }) +} diff --git a/testing/testing/cover.go b/testing/testing/cover.go new file mode 100644 index 0000000..ce00b26 --- /dev/null +++ b/testing/testing/cover.go @@ -0,0 +1,125 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Support for test coverage. + +package testing + +import ( + "fmt" + "os" + "sync/atomic" + + "github.com/CodSpeedHQ/codspeed-go/testing/internal/goexperiment" +) + +// CoverBlock records the coverage data for a single basic block. +// The fields are 1-indexed, as in an editor: The opening line of +// the file is number 1, for example. Columns are measured +// in bytes. +// NOTE: This struct is internal to the testing infrastructure and may change. +// It is not covered (yet) by the Go 1 compatibility guidelines. +type CoverBlock struct { + Line0 uint32 // Line number for block start. + Col0 uint16 // Column number for block start. + Line1 uint32 // Line number for block end. + Col1 uint16 // Column number for block end. + Stmts uint16 // Number of statements included in this block. +} + +var cover Cover + +// Cover records information about test coverage checking. +// NOTE: This struct is internal to the testing infrastructure and may change. +// It is not covered (yet) by the Go 1 compatibility guidelines. +type Cover struct { + Mode string + Counters map[string][]uint32 + Blocks map[string][]CoverBlock + CoveredPackages string +} + +// Coverage reports the current code coverage as a fraction in the range [0, 1]. +// If coverage is not enabled, Coverage returns 0. +// +// When running a large set of sequential test cases, checking Coverage after each one +// can be useful for identifying which test cases exercise new code paths. +// It is not a replacement for the reports generated by 'go test -cover' and +// 'go tool cover'. +func Coverage() float64 { + if goexperiment.CoverageRedesign { + return coverage2() + } + var n, d int64 + for _, counters := range cover.Counters { + for i := range counters { + if atomic.LoadUint32(&counters[i]) > 0 { + n++ + } + d++ + } + } + if d == 0 { + return 0 + } + return float64(n) / float64(d) +} + +// RegisterCover records the coverage data accumulators for the tests. +// NOTE: This function is internal to the testing infrastructure and may change. +// It is not covered (yet) by the Go 1 compatibility guidelines. +func RegisterCover(c Cover) { + cover = c +} + +// mustBeNil checks the error and, if present, reports it and exits. +func mustBeNil(err error) { + if err != nil { + fmt.Fprintf(os.Stderr, "testing: %s\n", err) + os.Exit(2) + } +} + +// coverReport reports the coverage percentage and writes a coverage profile if requested. +func coverReport() { + if goexperiment.CoverageRedesign { + coverReport2() + return + } + var f *os.File + var err error + if *coverProfile != "" { + f, err = os.Create(toOutputDir(*coverProfile)) + mustBeNil(err) + fmt.Fprintf(f, "mode: %s\n", cover.Mode) + defer func() { mustBeNil(f.Close()) }() + } + + var active, total int64 + var count uint32 + for name, counts := range cover.Counters { + blocks := cover.Blocks[name] + for i := range counts { + stmts := int64(blocks[i].Stmts) + total += stmts + count = atomic.LoadUint32(&counts[i]) // For -mode=atomic. + if count > 0 { + active += stmts + } + if f != nil { + _, err := fmt.Fprintf(f, "%s:%d.%d,%d.%d %d %d\n", name, + blocks[i].Line0, blocks[i].Col0, + blocks[i].Line1, blocks[i].Col1, + stmts, + count) + mustBeNil(err) + } + } + } + if total == 0 { + fmt.Println("coverage: [no statements]") + return + } + fmt.Printf("coverage: %.1f%% of statements%s\n", 100*float64(active)/float64(total), cover.CoveredPackages) +} diff --git a/testing/testing/example.go b/testing/testing/example.go new file mode 100644 index 0000000..c343ae2 --- /dev/null +++ b/testing/testing/example.go @@ -0,0 +1,102 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package testing + +import ( + "fmt" + "runtime" + "slices" + "strings" + "time" +) + +type InternalExample struct { + Name string + F func() + Output string + Unordered bool +} + +// RunExamples is an internal function but exported because it is cross-package; +// it is part of the implementation of the "go test" command. +func RunExamples(matchString func(pat, str string) (bool, error), examples []InternalExample) (ok bool) { + _, ok = runExamples(matchString, examples) + return ok +} + +func runExamples(matchString func(pat, str string) (bool, error), examples []InternalExample) (ran, ok bool) { + ok = true + + m := newMatcher(matchString, *match, "-test.run", *skip) + + var eg InternalExample + for _, eg = range examples { + _, matched, _ := m.fullName(nil, eg.Name) + if !matched { + continue + } + ran = true + if !runExample(eg) { + ok = false + } + } + + return ran, ok +} + +func sortLines(output string) string { + lines := strings.Split(output, "\n") + slices.Sort(lines) + return strings.Join(lines, "\n") +} + +// processRunResult computes a summary and status of the result of running an example test. +// stdout is the captured output from stdout of the test. +// recovered is the result of invoking recover after running the test, in case it panicked. +// +// If stdout doesn't match the expected output or if recovered is non-nil, it'll print the cause of failure to stdout. +// If the test is chatty/verbose, it'll print a success message to stdout. +// If recovered is non-nil, it'll panic with that value. +// If the test panicked with nil, or invoked runtime.Goexit, it'll be +// made to fail and panic with errNilPanicOrGoexit +func (eg *InternalExample) processRunResult(stdout string, timeSpent time.Duration, finished bool, recovered any) (passed bool) { + passed = true + dstr := fmtDuration(timeSpent) + var fail string + got := strings.TrimSpace(stdout) + want := strings.TrimSpace(eg.Output) + if runtime.GOOS == "windows" { + got = strings.ReplaceAll(got, "\r\n", "\n") + want = strings.ReplaceAll(want, "\r\n", "\n") + } + if eg.Unordered { + if sortLines(got) != sortLines(want) && recovered == nil { + fail = fmt.Sprintf("got:\n%s\nwant (unordered):\n%s\n", stdout, eg.Output) + } + } else { + if got != want && recovered == nil { + fail = fmt.Sprintf("got:\n%s\nwant:\n%s\n", got, want) + } + } + if fail != "" || !finished || recovered != nil { + fmt.Printf("%s--- FAIL: %s (%s)\n%s", chatty.prefix(), eg.Name, dstr, fail) + passed = false + } else if chatty.on { + fmt.Printf("%s--- PASS: %s (%s)\n", chatty.prefix(), eg.Name, dstr) + } + + if chatty.on && chatty.json { + fmt.Printf("%s=== NAME %s\n", chatty.prefix(), "") + } + + if recovered != nil { + // Propagate the previously recovered result, by panicking. + panic(recovered) + } else if !finished { + panic(errNilPanicOrGoexit) + } + + return +} diff --git a/testing/testing/example_loop_test.go b/testing/testing/example_loop_test.go new file mode 100644 index 0000000..eff8bab --- /dev/null +++ b/testing/testing/example_loop_test.go @@ -0,0 +1,48 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package testing_test + +import ( + "math/rand/v2" + "testing" +) + +// ExBenchmark shows how to use b.Loop in a benchmark. +// +// (If this were a real benchmark, not an example, this would be named +// BenchmarkSomething.) +func ExBenchmark(b *testing.B) { + // Generate a large random slice to use as an input. + // Since this is done before the first call to b.Loop(), + // it doesn't count toward the benchmark time. + input := make([]int, 128<<10) + for i := range input { + input[i] = rand.Int() + } + + // Perform the benchmark. + for b.Loop() { + // Normally, the compiler would be allowed to optimize away the call + // to sum because it has no side effects and the result isn't used. + // However, inside a b.Loop loop, the compiler ensures function calls + // aren't optimized away. + sum(input) + } + + // Outside the loop, the timer is stopped, so we could perform + // cleanup if necessary without affecting the result. +} + +func sum(data []int) int { + total := 0 + for _, value := range data { + total += value + } + return total +} + +func ExampleB_Loop() { + testing.Benchmark(ExBenchmark) +} diff --git a/testing/testing/export_test.go b/testing/testing/export_test.go new file mode 100644 index 0000000..a2dddc7 --- /dev/null +++ b/testing/testing/export_test.go @@ -0,0 +1,13 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package testing + +var PrettyPrint = prettyPrint + +type HighPrecisionTime = highPrecisionTime + +var HighPrecisionTimeNow = highPrecisionTimeNow + +const ParallelConflict = parallelConflict diff --git a/testing/testing/flag_test.go b/testing/testing/flag_test.go new file mode 100644 index 0000000..e47cac7 --- /dev/null +++ b/testing/testing/flag_test.go @@ -0,0 +1,86 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package testing_test + +import ( + "flag" + "os" + "os/exec" + "testing" + + "github.com/CodSpeedHQ/codspeed-go/testing/internal/testenv" +) + +var testFlagArg = flag.String("test_flag_arg", "", "TestFlag: passing -v option") + +const flagTestEnv = "GO_WANT_FLAG_HELPER_PROCESS" + +func TestFlag(t *testing.T) { + if os.Getenv(flagTestEnv) == "1" { + testFlagHelper(t) + return + } + + testenv.MustHaveExec(t) + + for _, flag := range []string{"", "-test.v", "-test.v=test2json"} { + flag := flag + t.Run(flag, func(t *testing.T) { + t.Parallel() + cmd := exec.Command(testenv.Executable(t), "-test.run=^TestFlag$", "-test_flag_arg="+flag) + if flag != "" { + cmd.Args = append(cmd.Args, flag) + } + cmd.Env = append(cmd.Environ(), flagTestEnv+"=1") + b, err := cmd.CombinedOutput() + if len(b) > 0 { + // When we set -test.v=test2json, we need to escape the ^V control + // character used for JSON framing so that the JSON parser doesn't + // misinterpret the subprocess output as output from the parent test. + t.Logf("%q", b) + } + if err != nil { + t.Error(err) + } + }) + } +} + +// testFlagHelper is called by the TestFlagHelper subprocess. +func testFlagHelper(t *testing.T) { + f := flag.Lookup("test.v") + if f == nil { + t.Fatal(`flag.Lookup("test.v") failed`) + } + + bf, ok := f.Value.(interface{ IsBoolFlag() bool }) + if !ok { + t.Errorf("test.v flag (type %T) does not have IsBoolFlag method", f) + } else if !bf.IsBoolFlag() { + t.Error("test.v IsBoolFlag() returned false") + } + + gf, ok := f.Value.(flag.Getter) + if !ok { + t.Fatalf("test.v flag (type %T) does not have Get method", f) + } + v := gf.Get() + + var want any + switch *testFlagArg { + case "": + want = false + case "-test.v": + want = true + case "-test.v=test2json": + want = "test2json" + default: + t.Fatalf("unexpected test_flag_arg %q", *testFlagArg) + } + + if v != want { + t.Errorf("test.v is %v want %v", v, want) + } +} diff --git a/testing/testing/fstest/mapfs.go b/testing/testing/fstest/mapfs.go new file mode 100644 index 0000000..5e3720b --- /dev/null +++ b/testing/testing/fstest/mapfs.go @@ -0,0 +1,244 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package fstest + +import ( + "io" + "io/fs" + "path" + "slices" + "strings" + "time" +) + +// A MapFS is a simple in-memory file system for use in tests, +// represented as a map from path names (arguments to Open) +// to information about the files or directories they represent. +// +// The map need not include parent directories for files contained +// in the map; those will be synthesized if needed. +// But a directory can still be included by setting the [MapFile.Mode]'s [fs.ModeDir] bit; +// this may be necessary for detailed control over the directory's [fs.FileInfo] +// or to create an empty directory. +// +// File system operations read directly from the map, +// so that the file system can be changed by editing the map as needed. +// An implication is that file system operations must not run concurrently +// with changes to the map, which would be a race. +// Another implication is that opening or reading a directory requires +// iterating over the entire map, so a MapFS should typically be used with not more +// than a few hundred entries or directory reads. +type MapFS map[string]*MapFile + +// A MapFile describes a single file in a [MapFS]. +type MapFile struct { + Data []byte // file content + Mode fs.FileMode // fs.FileInfo.Mode + ModTime time.Time // fs.FileInfo.ModTime + Sys any // fs.FileInfo.Sys +} + +var _ fs.FS = MapFS(nil) +var _ fs.File = (*openMapFile)(nil) + +// Open opens the named file. +func (fsys MapFS) Open(name string) (fs.File, error) { + if !fs.ValidPath(name) { + return nil, &fs.PathError{Op: "open", Path: name, Err: fs.ErrNotExist} + } + file := fsys[name] + if file != nil && file.Mode&fs.ModeDir == 0 { + // Ordinary file + return &openMapFile{name, mapFileInfo{path.Base(name), file}, 0}, nil + } + + // Directory, possibly synthesized. + // Note that file can be nil here: the map need not contain explicit parent directories for all its files. + // But file can also be non-nil, in case the user wants to set metadata for the directory explicitly. + // Either way, we need to construct the list of children of this directory. + var list []mapFileInfo + var elem string + var need = make(map[string]bool) + if name == "." { + elem = "." + for fname, f := range fsys { + i := strings.Index(fname, "/") + if i < 0 { + if fname != "." { + list = append(list, mapFileInfo{fname, f}) + } + } else { + need[fname[:i]] = true + } + } + } else { + elem = name[strings.LastIndex(name, "/")+1:] + prefix := name + "/" + for fname, f := range fsys { + if strings.HasPrefix(fname, prefix) { + felem := fname[len(prefix):] + i := strings.Index(felem, "/") + if i < 0 { + list = append(list, mapFileInfo{felem, f}) + } else { + need[fname[len(prefix):len(prefix)+i]] = true + } + } + } + // If the directory name is not in the map, + // and there are no children of the name in the map, + // then the directory is treated as not existing. + if file == nil && list == nil && len(need) == 0 { + return nil, &fs.PathError{Op: "open", Path: name, Err: fs.ErrNotExist} + } + } + for _, fi := range list { + delete(need, fi.name) + } + for name := range need { + list = append(list, mapFileInfo{name, &MapFile{Mode: fs.ModeDir | 0555}}) + } + slices.SortFunc(list, func(a, b mapFileInfo) int { + return strings.Compare(a.name, b.name) + }) + + if file == nil { + file = &MapFile{Mode: fs.ModeDir | 0555} + } + return &mapDir{name, mapFileInfo{elem, file}, list, 0}, nil +} + +// fsOnly is a wrapper that hides all but the fs.FS methods, +// to avoid an infinite recursion when implementing special +// methods in terms of helpers that would use them. +// (In general, implementing these methods using the package fs helpers +// is redundant and unnecessary, but having the methods may make +// MapFS exercise more code paths when used in tests.) +type fsOnly struct{ fs.FS } + +func (fsys MapFS) ReadFile(name string) ([]byte, error) { + return fs.ReadFile(fsOnly{fsys}, name) +} + +func (fsys MapFS) Stat(name string) (fs.FileInfo, error) { + return fs.Stat(fsOnly{fsys}, name) +} + +func (fsys MapFS) ReadDir(name string) ([]fs.DirEntry, error) { + return fs.ReadDir(fsOnly{fsys}, name) +} + +func (fsys MapFS) Glob(pattern string) ([]string, error) { + return fs.Glob(fsOnly{fsys}, pattern) +} + +type noSub struct { + MapFS +} + +func (noSub) Sub() {} // not the fs.SubFS signature + +func (fsys MapFS) Sub(dir string) (fs.FS, error) { + return fs.Sub(noSub{fsys}, dir) +} + +// A mapFileInfo implements fs.FileInfo and fs.DirEntry for a given map file. +type mapFileInfo struct { + name string + f *MapFile +} + +func (i *mapFileInfo) Name() string { return path.Base(i.name) } +func (i *mapFileInfo) Size() int64 { return int64(len(i.f.Data)) } +func (i *mapFileInfo) Mode() fs.FileMode { return i.f.Mode } +func (i *mapFileInfo) Type() fs.FileMode { return i.f.Mode.Type() } +func (i *mapFileInfo) ModTime() time.Time { return i.f.ModTime } +func (i *mapFileInfo) IsDir() bool { return i.f.Mode&fs.ModeDir != 0 } +func (i *mapFileInfo) Sys() any { return i.f.Sys } +func (i *mapFileInfo) Info() (fs.FileInfo, error) { return i, nil } + +func (i *mapFileInfo) String() string { + return fs.FormatFileInfo(i) +} + +// An openMapFile is a regular (non-directory) fs.File open for reading. +type openMapFile struct { + path string + mapFileInfo + offset int64 +} + +func (f *openMapFile) Stat() (fs.FileInfo, error) { return &f.mapFileInfo, nil } + +func (f *openMapFile) Close() error { return nil } + +func (f *openMapFile) Read(b []byte) (int, error) { + if f.offset >= int64(len(f.f.Data)) { + return 0, io.EOF + } + if f.offset < 0 { + return 0, &fs.PathError{Op: "read", Path: f.path, Err: fs.ErrInvalid} + } + n := copy(b, f.f.Data[f.offset:]) + f.offset += int64(n) + return n, nil +} + +func (f *openMapFile) Seek(offset int64, whence int) (int64, error) { + switch whence { + case 0: + // offset += 0 + case 1: + offset += f.offset + case 2: + offset += int64(len(f.f.Data)) + } + if offset < 0 || offset > int64(len(f.f.Data)) { + return 0, &fs.PathError{Op: "seek", Path: f.path, Err: fs.ErrInvalid} + } + f.offset = offset + return offset, nil +} + +func (f *openMapFile) ReadAt(b []byte, offset int64) (int, error) { + if offset < 0 || offset > int64(len(f.f.Data)) { + return 0, &fs.PathError{Op: "read", Path: f.path, Err: fs.ErrInvalid} + } + n := copy(b, f.f.Data[offset:]) + if n < len(b) { + return n, io.EOF + } + return n, nil +} + +// A mapDir is a directory fs.File (so also an fs.ReadDirFile) open for reading. +type mapDir struct { + path string + mapFileInfo + entry []mapFileInfo + offset int +} + +func (d *mapDir) Stat() (fs.FileInfo, error) { return &d.mapFileInfo, nil } +func (d *mapDir) Close() error { return nil } +func (d *mapDir) Read(b []byte) (int, error) { + return 0, &fs.PathError{Op: "read", Path: d.path, Err: fs.ErrInvalid} +} + +func (d *mapDir) ReadDir(count int) ([]fs.DirEntry, error) { + n := len(d.entry) - d.offset + if n == 0 && count > 0 { + return nil, io.EOF + } + if count > 0 && n > count { + n = count + } + list := make([]fs.DirEntry, n) + for i := range list { + list[i] = &d.entry[d.offset+i] + } + d.offset += n + return list, nil +} diff --git a/testing/testing/fstest/mapfs_test.go b/testing/testing/fstest/mapfs_test.go new file mode 100644 index 0000000..6381a2e --- /dev/null +++ b/testing/testing/fstest/mapfs_test.go @@ -0,0 +1,59 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package fstest + +import ( + "fmt" + "io/fs" + "strings" + "testing" +) + +func TestMapFS(t *testing.T) { + m := MapFS{ + "hello": {Data: []byte("hello, world\n")}, + "fortune/k/ken.txt": {Data: []byte("If a program is too slow, it must have a loop.\n")}, + } + if err := TestFS(m, "hello", "fortune", "fortune/k", "fortune/k/ken.txt"); err != nil { + t.Fatal(err) + } +} + +func TestMapFSChmodDot(t *testing.T) { + m := MapFS{ + "a/b.txt": &MapFile{Mode: 0666}, + ".": &MapFile{Mode: 0777 | fs.ModeDir}, + } + buf := new(strings.Builder) + fs.WalkDir(m, ".", func(path string, d fs.DirEntry, err error) error { + fi, err := d.Info() + if err != nil { + return err + } + fmt.Fprintf(buf, "%s: %v\n", path, fi.Mode()) + return nil + }) + want := ` +.: drwxrwxrwx +a: dr-xr-xr-x +a/b.txt: -rw-rw-rw- +`[1:] + got := buf.String() + if want != got { + t.Errorf("MapFS modes want:\n%s\ngot:\n%s\n", want, got) + } +} + +func TestMapFSFileInfoName(t *testing.T) { + m := MapFS{ + "path/to/b.txt": &MapFile{}, + } + info, _ := m.Stat("path/to/b.txt") + want := "b.txt" + got := info.Name() + if want != got { + t.Errorf("MapFS FileInfo.Name want:\n%s\ngot:\n%s\n", want, got) + } +} diff --git a/testing/testing/fstest/testfs.go b/testing/testing/fstest/testfs.go new file mode 100644 index 0000000..affdfa6 --- /dev/null +++ b/testing/testing/fstest/testfs.go @@ -0,0 +1,615 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package fstest implements support for testing implementations and users of file systems. +package fstest + +import ( + "errors" + "fmt" + "io" + "io/fs" + "maps" + "path" + "slices" + "strings" + "testing/iotest" +) + +// TestFS tests a file system implementation. +// It walks the entire tree of files in fsys, +// opening and checking that each file behaves correctly. +// It also checks that the file system contains at least the expected files. +// As a special case, if no expected files are listed, fsys must be empty. +// Otherwise, fsys must contain at least the listed files; it can also contain others. +// The contents of fsys must not change concurrently with TestFS. +// +// If TestFS finds any misbehaviors, it returns either the first error or a +// list of errors. Use [errors.Is] or [errors.As] to inspect. +// +// Typical usage inside a test is: +// +// if err := fstest.TestFS(myFS, "file/that/should/be/present"); err != nil { +// t.Fatal(err) +// } +func TestFS(fsys fs.FS, expected ...string) error { + if err := testFS(fsys, expected...); err != nil { + return err + } + for _, name := range expected { + if i := strings.Index(name, "/"); i >= 0 { + dir, dirSlash := name[:i], name[:i+1] + var subExpected []string + for _, name := range expected { + if strings.HasPrefix(name, dirSlash) { + subExpected = append(subExpected, name[len(dirSlash):]) + } + } + sub, err := fs.Sub(fsys, dir) + if err != nil { + return err + } + if err := testFS(sub, subExpected...); err != nil { + return fmt.Errorf("testing fs.Sub(fsys, %s): %w", dir, err) + } + break // one sub-test is enough + } + } + return nil +} + +func testFS(fsys fs.FS, expected ...string) error { + t := fsTester{fsys: fsys} + t.checkDir(".") + t.checkOpen(".") + found := make(map[string]bool) + for _, dir := range t.dirs { + found[dir] = true + } + for _, file := range t.files { + found[file] = true + } + delete(found, ".") + if len(expected) == 0 && len(found) > 0 { + list := slices.Sorted(maps.Keys(found)) + if len(list) > 15 { + list = append(list[:10], "...") + } + t.errorf("expected empty file system but found files:\n%s", strings.Join(list, "\n")) + } + for _, name := range expected { + if !found[name] { + t.errorf("expected but not found: %s", name) + } + } + if len(t.errors) == 0 { + return nil + } + return fmt.Errorf("TestFS found errors:\n%w", errors.Join(t.errors...)) +} + +// An fsTester holds state for running the test. +type fsTester struct { + fsys fs.FS + errors []error + dirs []string + files []string +} + +// errorf adds an error to the list of errors. +func (t *fsTester) errorf(format string, args ...any) { + t.errors = append(t.errors, fmt.Errorf(format, args...)) +} + +func (t *fsTester) openDir(dir string) fs.ReadDirFile { + f, err := t.fsys.Open(dir) + if err != nil { + t.errorf("%s: Open: %w", dir, err) + return nil + } + d, ok := f.(fs.ReadDirFile) + if !ok { + f.Close() + t.errorf("%s: Open returned File type %T, not a fs.ReadDirFile", dir, f) + return nil + } + return d +} + +// checkDir checks the directory dir, which is expected to exist +// (it is either the root or was found in a directory listing with IsDir true). +func (t *fsTester) checkDir(dir string) { + // Read entire directory. + t.dirs = append(t.dirs, dir) + d := t.openDir(dir) + if d == nil { + return + } + list, err := d.ReadDir(-1) + if err != nil { + d.Close() + t.errorf("%s: ReadDir(-1): %w", dir, err) + return + } + + // Check all children. + var prefix string + if dir == "." { + prefix = "" + } else { + prefix = dir + "/" + } + for _, info := range list { + name := info.Name() + switch { + case name == ".", name == "..", name == "": + t.errorf("%s: ReadDir: child has invalid name: %#q", dir, name) + continue + case strings.Contains(name, "/"): + t.errorf("%s: ReadDir: child name contains slash: %#q", dir, name) + continue + case strings.Contains(name, `\`): + t.errorf("%s: ReadDir: child name contains backslash: %#q", dir, name) + continue + } + path := prefix + name + t.checkStat(path, info) + t.checkOpen(path) + if info.IsDir() { + t.checkDir(path) + } else { + t.checkFile(path) + } + } + + // Check ReadDir(-1) at EOF. + list2, err := d.ReadDir(-1) + if len(list2) > 0 || err != nil { + d.Close() + t.errorf("%s: ReadDir(-1) at EOF = %d entries, %w, wanted 0 entries, nil", dir, len(list2), err) + return + } + + // Check ReadDir(1) at EOF (different results). + list2, err = d.ReadDir(1) + if len(list2) > 0 || err != io.EOF { + d.Close() + t.errorf("%s: ReadDir(1) at EOF = %d entries, %w, wanted 0 entries, EOF", dir, len(list2), err) + return + } + + // Check that close does not report an error. + if err := d.Close(); err != nil { + t.errorf("%s: Close: %w", dir, err) + } + + // Check that closing twice doesn't crash. + // The return value doesn't matter. + d.Close() + + // Reopen directory, read a second time, make sure contents match. + if d = t.openDir(dir); d == nil { + return + } + defer d.Close() + list2, err = d.ReadDir(-1) + if err != nil { + t.errorf("%s: second Open+ReadDir(-1): %w", dir, err) + return + } + t.checkDirList(dir, "first Open+ReadDir(-1) vs second Open+ReadDir(-1)", list, list2) + + // Reopen directory, read a third time in pieces, make sure contents match. + if d = t.openDir(dir); d == nil { + return + } + defer d.Close() + list2 = nil + for { + n := 1 + if len(list2) > 0 { + n = 2 + } + frag, err := d.ReadDir(n) + if len(frag) > n { + t.errorf("%s: third Open: ReadDir(%d) after %d: %d entries (too many)", dir, n, len(list2), len(frag)) + return + } + list2 = append(list2, frag...) + if err == io.EOF { + break + } + if err != nil { + t.errorf("%s: third Open: ReadDir(%d) after %d: %w", dir, n, len(list2), err) + return + } + if n == 0 { + t.errorf("%s: third Open: ReadDir(%d) after %d: 0 entries but nil error", dir, n, len(list2)) + return + } + } + t.checkDirList(dir, "first Open+ReadDir(-1) vs third Open+ReadDir(1,2) loop", list, list2) + + // If fsys has ReadDir, check that it matches and is sorted. + if fsys, ok := t.fsys.(fs.ReadDirFS); ok { + list2, err := fsys.ReadDir(dir) + if err != nil { + t.errorf("%s: fsys.ReadDir: %w", dir, err) + return + } + t.checkDirList(dir, "first Open+ReadDir(-1) vs fsys.ReadDir", list, list2) + + for i := 0; i+1 < len(list2); i++ { + if list2[i].Name() >= list2[i+1].Name() { + t.errorf("%s: fsys.ReadDir: list not sorted: %s before %s", dir, list2[i].Name(), list2[i+1].Name()) + } + } + } + + // Check fs.ReadDir as well. + list2, err = fs.ReadDir(t.fsys, dir) + if err != nil { + t.errorf("%s: fs.ReadDir: %w", dir, err) + return + } + t.checkDirList(dir, "first Open+ReadDir(-1) vs fs.ReadDir", list, list2) + + for i := 0; i+1 < len(list2); i++ { + if list2[i].Name() >= list2[i+1].Name() { + t.errorf("%s: fs.ReadDir: list not sorted: %s before %s", dir, list2[i].Name(), list2[i+1].Name()) + } + } + + t.checkGlob(dir, list2) +} + +// formatEntry formats an fs.DirEntry into a string for error messages and comparison. +func formatEntry(entry fs.DirEntry) string { + return fmt.Sprintf("%s IsDir=%v Type=%v", entry.Name(), entry.IsDir(), entry.Type()) +} + +// formatInfoEntry formats an fs.FileInfo into a string like the result of formatEntry, for error messages and comparison. +func formatInfoEntry(info fs.FileInfo) string { + return fmt.Sprintf("%s IsDir=%v Type=%v", info.Name(), info.IsDir(), info.Mode().Type()) +} + +// formatInfo formats an fs.FileInfo into a string for error messages and comparison. +func formatInfo(info fs.FileInfo) string { + return fmt.Sprintf("%s IsDir=%v Mode=%v Size=%d ModTime=%v", info.Name(), info.IsDir(), info.Mode(), info.Size(), info.ModTime()) +} + +// checkGlob checks that various glob patterns work if the file system implements GlobFS. +func (t *fsTester) checkGlob(dir string, list []fs.DirEntry) { + if _, ok := t.fsys.(fs.GlobFS); !ok { + return + } + + // Make a complex glob pattern prefix that only matches dir. + var glob string + if dir != "." { + elem := strings.Split(dir, "/") + for i, e := range elem { + var pattern []rune + for j, r := range e { + if r == '*' || r == '?' || r == '\\' || r == '[' || r == '-' { + pattern = append(pattern, '\\', r) + continue + } + switch (i + j) % 5 { + case 0: + pattern = append(pattern, r) + case 1: + pattern = append(pattern, '[', r, ']') + case 2: + pattern = append(pattern, '[', r, '-', r, ']') + case 3: + pattern = append(pattern, '[', '\\', r, ']') + case 4: + pattern = append(pattern, '[', '\\', r, '-', '\\', r, ']') + } + } + elem[i] = string(pattern) + } + glob = strings.Join(elem, "/") + "/" + } + + // Test that malformed patterns are detected. + // The error is likely path.ErrBadPattern but need not be. + if _, err := t.fsys.(fs.GlobFS).Glob(glob + "nonexist/[]"); err == nil { + t.errorf("%s: Glob(%#q): bad pattern not detected", dir, glob+"nonexist/[]") + } + + // Try to find a letter that appears in only some of the final names. + c := rune('a') + for ; c <= 'z'; c++ { + have, haveNot := false, false + for _, d := range list { + if strings.ContainsRune(d.Name(), c) { + have = true + } else { + haveNot = true + } + } + if have && haveNot { + break + } + } + if c > 'z' { + c = 'a' + } + glob += "*" + string(c) + "*" + + var want []string + for _, d := range list { + if strings.ContainsRune(d.Name(), c) { + want = append(want, path.Join(dir, d.Name())) + } + } + + names, err := t.fsys.(fs.GlobFS).Glob(glob) + if err != nil { + t.errorf("%s: Glob(%#q): %w", dir, glob, err) + return + } + if slices.Equal(want, names) { + return + } + + if !slices.IsSorted(names) { + t.errorf("%s: Glob(%#q): unsorted output:\n%s", dir, glob, strings.Join(names, "\n")) + slices.Sort(names) + } + + var problems []string + for len(want) > 0 || len(names) > 0 { + switch { + case len(want) > 0 && len(names) > 0 && want[0] == names[0]: + want, names = want[1:], names[1:] + case len(want) > 0 && (len(names) == 0 || want[0] < names[0]): + problems = append(problems, "missing: "+want[0]) + want = want[1:] + default: + problems = append(problems, "extra: "+names[0]) + names = names[1:] + } + } + t.errorf("%s: Glob(%#q): wrong output:\n%s", dir, glob, strings.Join(problems, "\n")) +} + +// checkStat checks that a direct stat of path matches entry, +// which was found in the parent's directory listing. +func (t *fsTester) checkStat(path string, entry fs.DirEntry) { + file, err := t.fsys.Open(path) + if err != nil { + t.errorf("%s: Open: %w", path, err) + return + } + info, err := file.Stat() + file.Close() + if err != nil { + t.errorf("%s: Stat: %w", path, err) + return + } + fentry := formatEntry(entry) + fientry := formatInfoEntry(info) + // Note: mismatch here is OK for symlink, because Open dereferences symlink. + if fentry != fientry && entry.Type()&fs.ModeSymlink == 0 { + t.errorf("%s: mismatch:\n\tentry = %s\n\tfile.Stat() = %s", path, fentry, fientry) + } + + einfo, err := entry.Info() + if err != nil { + t.errorf("%s: entry.Info: %w", path, err) + return + } + finfo := formatInfo(info) + if entry.Type()&fs.ModeSymlink != 0 { + // For symlink, just check that entry.Info matches entry on common fields. + // Open deferences symlink, so info itself may differ. + feentry := formatInfoEntry(einfo) + if fentry != feentry { + t.errorf("%s: mismatch\n\tentry = %s\n\tentry.Info() = %s\n", path, fentry, feentry) + } + } else { + feinfo := formatInfo(einfo) + if feinfo != finfo { + t.errorf("%s: mismatch:\n\tentry.Info() = %s\n\tfile.Stat() = %s\n", path, feinfo, finfo) + } + } + + // Stat should be the same as Open+Stat, even for symlinks. + info2, err := fs.Stat(t.fsys, path) + if err != nil { + t.errorf("%s: fs.Stat: %w", path, err) + return + } + finfo2 := formatInfo(info2) + if finfo2 != finfo { + t.errorf("%s: fs.Stat(...) = %s\n\twant %s", path, finfo2, finfo) + } + + if fsys, ok := t.fsys.(fs.StatFS); ok { + info2, err := fsys.Stat(path) + if err != nil { + t.errorf("%s: fsys.Stat: %w", path, err) + return + } + finfo2 := formatInfo(info2) + if finfo2 != finfo { + t.errorf("%s: fsys.Stat(...) = %s\n\twant %s", path, finfo2, finfo) + } + } +} + +// checkDirList checks that two directory lists contain the same files and file info. +// The order of the lists need not match. +func (t *fsTester) checkDirList(dir, desc string, list1, list2 []fs.DirEntry) { + old := make(map[string]fs.DirEntry) + checkMode := func(entry fs.DirEntry) { + if entry.IsDir() != (entry.Type()&fs.ModeDir != 0) { + if entry.IsDir() { + t.errorf("%s: ReadDir returned %s with IsDir() = true, Type() & ModeDir = 0", dir, entry.Name()) + } else { + t.errorf("%s: ReadDir returned %s with IsDir() = false, Type() & ModeDir = ModeDir", dir, entry.Name()) + } + } + } + + for _, entry1 := range list1 { + old[entry1.Name()] = entry1 + checkMode(entry1) + } + + var diffs []string + for _, entry2 := range list2 { + entry1 := old[entry2.Name()] + if entry1 == nil { + checkMode(entry2) + diffs = append(diffs, "+ "+formatEntry(entry2)) + continue + } + if formatEntry(entry1) != formatEntry(entry2) { + diffs = append(diffs, "- "+formatEntry(entry1), "+ "+formatEntry(entry2)) + } + delete(old, entry2.Name()) + } + for _, entry1 := range old { + diffs = append(diffs, "- "+formatEntry(entry1)) + } + + if len(diffs) == 0 { + return + } + + slices.SortFunc(diffs, func(a, b string) int { + fa := strings.Fields(a) + fb := strings.Fields(b) + // sort by name (i < j) and then +/- (j < i, because + < -) + return strings.Compare(fa[1]+" "+fb[0], fb[1]+" "+fa[0]) + }) + + t.errorf("%s: diff %s:\n\t%s", dir, desc, strings.Join(diffs, "\n\t")) +} + +// checkFile checks that basic file reading works correctly. +func (t *fsTester) checkFile(file string) { + t.files = append(t.files, file) + + // Read entire file. + f, err := t.fsys.Open(file) + if err != nil { + t.errorf("%s: Open: %w", file, err) + return + } + + data, err := io.ReadAll(f) + if err != nil { + f.Close() + t.errorf("%s: Open+ReadAll: %w", file, err) + return + } + + if err := f.Close(); err != nil { + t.errorf("%s: Close: %w", file, err) + } + + // Check that closing twice doesn't crash. + // The return value doesn't matter. + f.Close() + + // Check that ReadFile works if present. + if fsys, ok := t.fsys.(fs.ReadFileFS); ok { + data2, err := fsys.ReadFile(file) + if err != nil { + t.errorf("%s: fsys.ReadFile: %w", file, err) + return + } + t.checkFileRead(file, "ReadAll vs fsys.ReadFile", data, data2) + + // Modify the data and check it again. Modifying the + // returned byte slice should not affect the next call. + for i := range data2 { + data2[i]++ + } + data2, err = fsys.ReadFile(file) + if err != nil { + t.errorf("%s: second call to fsys.ReadFile: %w", file, err) + return + } + t.checkFileRead(file, "Readall vs second fsys.ReadFile", data, data2) + + t.checkBadPath(file, "ReadFile", + func(name string) error { _, err := fsys.ReadFile(name); return err }) + } + + // Check that fs.ReadFile works with t.fsys. + data2, err := fs.ReadFile(t.fsys, file) + if err != nil { + t.errorf("%s: fs.ReadFile: %w", file, err) + return + } + t.checkFileRead(file, "ReadAll vs fs.ReadFile", data, data2) + + // Use iotest.TestReader to check small reads, Seek, ReadAt. + f, err = t.fsys.Open(file) + if err != nil { + t.errorf("%s: second Open: %w", file, err) + return + } + defer f.Close() + if err := iotest.TestReader(f, data); err != nil { + t.errorf("%s: failed TestReader:\n\t%s", file, strings.ReplaceAll(err.Error(), "\n", "\n\t")) + } +} + +func (t *fsTester) checkFileRead(file, desc string, data1, data2 []byte) { + if string(data1) != string(data2) { + t.errorf("%s: %s: different data returned\n\t%q\n\t%q", file, desc, data1, data2) + return + } +} + +// checkOpen validates file opening behavior by attempting to open and then close the given file path. +func (t *fsTester) checkOpen(file string) { + t.checkBadPath(file, "Open", func(file string) error { + f, err := t.fsys.Open(file) + if err == nil { + f.Close() + } + return err + }) +} + +// checkBadPath checks that various invalid forms of file's name cannot be opened using open. +func (t *fsTester) checkBadPath(file string, desc string, open func(string) error) { + bad := []string{ + "/" + file, + file + "/.", + } + if file == "." { + bad = append(bad, "/") + } + if i := strings.Index(file, "/"); i >= 0 { + bad = append(bad, + file[:i]+"//"+file[i+1:], + file[:i]+"/./"+file[i+1:], + file[:i]+`\`+file[i+1:], + file[:i]+"/../"+file, + ) + } + if i := strings.LastIndex(file, "/"); i >= 0 { + bad = append(bad, + file[:i]+"//"+file[i+1:], + file[:i]+"/./"+file[i+1:], + file[:i]+`\`+file[i+1:], + file+"/../"+file[i+1:], + ) + } + + for _, b := range bad { + if err := open(b); err == nil { + t.errorf("%s: %s(%s) succeeded, want error", file, desc, b) + } + } +} diff --git a/testing/testing/fstest/testfs_test.go b/testing/testing/fstest/testfs_test.go new file mode 100644 index 0000000..69853a0 --- /dev/null +++ b/testing/testing/fstest/testfs_test.go @@ -0,0 +1,118 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package fstest + +import ( + "errors" + "io/fs" + "os" + "path/filepath" + "slices" + "strings" + "testing" + + "github.com/CodSpeedHQ/codspeed-go/testing/internal/testenv" +) + +func TestSymlink(t *testing.T) { + testenv.MustHaveSymlink(t) + + tmp := t.TempDir() + tmpfs := os.DirFS(tmp) + + if err := os.WriteFile(filepath.Join(tmp, "hello"), []byte("hello, world\n"), 0644); err != nil { + t.Fatal(err) + } + + if err := os.Symlink(filepath.Join(tmp, "hello"), filepath.Join(tmp, "hello.link")); err != nil { + t.Fatal(err) + } + + if err := TestFS(tmpfs, "hello", "hello.link"); err != nil { + t.Fatal(err) + } +} + +func TestDash(t *testing.T) { + m := MapFS{ + "a-b/a": {Data: []byte("a-b/a")}, + } + if err := TestFS(m, "a-b/a"); err != nil { + t.Error(err) + } +} + +type shuffledFS MapFS + +func (fsys shuffledFS) Open(name string) (fs.File, error) { + f, err := MapFS(fsys).Open(name) + if err != nil { + return nil, err + } + return &shuffledFile{File: f}, nil +} + +type shuffledFile struct{ fs.File } + +func (f *shuffledFile) ReadDir(n int) ([]fs.DirEntry, error) { + dirents, err := f.File.(fs.ReadDirFile).ReadDir(n) + // Shuffle in a deterministic way, all we care about is making sure that the + // list of directory entries is not is the lexicographic order. + // + // We do this to make sure that the TestFS test suite is not affected by the + // order of directory entries. + slices.SortFunc(dirents, func(a, b fs.DirEntry) int { + return strings.Compare(b.Name(), a.Name()) + }) + return dirents, err +} + +func TestShuffledFS(t *testing.T) { + fsys := shuffledFS{ + "tmp/one": {Data: []byte("1")}, + "tmp/two": {Data: []byte("2")}, + "tmp/three": {Data: []byte("3")}, + } + if err := TestFS(fsys, "tmp/one", "tmp/two", "tmp/three"); err != nil { + t.Error(err) + } +} + +// failPermFS is a filesystem that always fails with fs.ErrPermission. +type failPermFS struct{} + +func (f failPermFS) Open(name string) (fs.File, error) { + if !fs.ValidPath(name) { + return nil, &fs.PathError{Op: "open", Path: name, Err: fs.ErrInvalid} + } + return nil, &fs.PathError{Op: "open", Path: name, Err: fs.ErrPermission} +} + +func TestTestFSWrappedErrors(t *testing.T) { + err := TestFS(failPermFS{}) + if err == nil { + t.Fatal("error expected") + } + t.Logf("Error (expecting wrapped fs.ErrPermission):\n%v", err) + + if !errors.Is(err, fs.ErrPermission) { + t.Errorf("error should be a wrapped ErrPermission: %#v", err) + } + + // TestFS is expected to return a list of errors. + // Enforce that the list can be extracted for browsing. + var errs interface{ Unwrap() []error } + if !errors.As(err, &errs) { + t.Errorf("caller should be able to extract the errors as a list: %#v", err) + } else { + for _, err := range errs.Unwrap() { + // ErrPermission is expected + // but any other error must be reported. + if !errors.Is(err, fs.ErrPermission) { + t.Errorf("unexpected error: %v", err) + } + } + } +} diff --git a/testing/testing/fuzz.go b/testing/testing/fuzz.go new file mode 100644 index 0000000..dceb786 --- /dev/null +++ b/testing/testing/fuzz.go @@ -0,0 +1,742 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package testing + +import ( + "context" + "errors" + "flag" + "fmt" + "io" + "os" + "path/filepath" + "reflect" + "runtime" + "strings" + "time" +) + +func initFuzzFlags() { + matchFuzz = flag.String("test.fuzz", "", "run the fuzz test matching `regexp`") + flag.Var(&fuzzDuration, "test.fuzztime", "time to spend fuzzing; default is to run indefinitely") + flag.Var(&minimizeDuration, "test.fuzzminimizetime", "time to spend minimizing a value after finding a failing input") + + fuzzCacheDir = flag.String("test.fuzzcachedir", "", "directory where interesting fuzzing inputs are stored (for use only by cmd/go)") + isFuzzWorker = flag.Bool("test.fuzzworker", false, "coordinate with the parent process to fuzz random values (for use only by cmd/go)") +} + +var ( + matchFuzz *string + fuzzDuration durationOrCountFlag + minimizeDuration = durationOrCountFlag{d: 60 * time.Second, allowZero: true} + fuzzCacheDir *string + isFuzzWorker *bool + + // corpusDir is the parent directory of the fuzz test's seed corpus within + // the package. + corpusDir = "testdata/fuzz" +) + +// fuzzWorkerExitCode is used as an exit code by fuzz worker processes after an +// internal error. This distinguishes internal errors from uncontrolled panics +// and other failures. Keep in sync with internal/fuzz.workerExitCode. +const fuzzWorkerExitCode = 70 + +// InternalFuzzTarget is an internal type but exported because it is +// cross-package; it is part of the implementation of the "go test" command. +type InternalFuzzTarget struct { + Name string + Fn func(f *F) +} + +// F is a type passed to fuzz tests. +// +// Fuzz tests run generated inputs against a provided fuzz target, which can +// find and report potential bugs in the code being tested. +// +// A fuzz test runs the seed corpus by default, which includes entries provided +// by (*F).Add and entries in the testdata/fuzz/ directory. After +// any necessary setup and calls to (*F).Add, the fuzz test must then call +// (*F).Fuzz to provide the fuzz target. See the testing package documentation +// for an example, and see the [F.Fuzz] and [F.Add] method documentation for +// details. +// +// *F methods can only be called before (*F).Fuzz. Once the test is +// executing the fuzz target, only (*T) methods can be used. The only *F methods +// that are allowed in the (*F).Fuzz function are (*F).Failed and (*F).Name. +type F struct { + common + fstate *fuzzState + tstate *testState + + // inFuzzFn is true when the fuzz function is running. Most F methods cannot + // be called when inFuzzFn is true. + inFuzzFn bool + + // corpus is a set of seed corpus entries, added with F.Add and loaded + // from testdata. + corpus []corpusEntry + + result fuzzResult + fuzzCalled bool +} + +var _ TB = (*F)(nil) + +// corpusEntry is an alias to the same type as internal/fuzz.CorpusEntry. +// We use a type alias because we don't want to export this type, and we can't +// import internal/fuzz from testing. +type corpusEntry = struct { + Parent string + Path string + Data []byte + Values []any + Generation int + IsSeed bool +} + +// Helper marks the calling function as a test helper function. +// When printing file and line information, that function will be skipped. +// Helper may be called simultaneously from multiple goroutines. +func (f *F) Helper() { + if f.inFuzzFn { + panic("testing: f.Helper was called inside the fuzz target, use t.Helper instead") + } + + // common.Helper is inlined here. + // If we called it, it would mark F.Helper as the helper + // instead of the caller. + f.mu.Lock() + defer f.mu.Unlock() + if f.helperPCs == nil { + f.helperPCs = make(map[uintptr]struct{}) + } + // repeating code from callerName here to save walking a stack frame + var pc [1]uintptr + n := runtime.Callers(2, pc[:]) // skip runtime.Callers + Helper + if n == 0 { + panic("testing: zero callers found") + } + if _, found := f.helperPCs[pc[0]]; !found { + f.helperPCs[pc[0]] = struct{}{} + f.helperNames = nil // map will be recreated next time it is needed + } +} + +// Fail marks the function as having failed but continues execution. +func (f *F) Fail() { + // (*F).Fail may be called by (*T).Fail, which we should allow. However, we + // shouldn't allow direct (*F).Fail calls from inside the (*F).Fuzz function. + if f.inFuzzFn { + panic("testing: f.Fail was called inside the fuzz target, use t.Fail instead") + } + f.common.Helper() + f.common.Fail() +} + +// Skipped reports whether the test was skipped. +func (f *F) Skipped() bool { + // (*F).Skipped may be called by tRunner, which we should allow. However, we + // shouldn't allow direct (*F).Skipped calls from inside the (*F).Fuzz function. + if f.inFuzzFn { + panic("testing: f.Skipped was called inside the fuzz target, use t.Skipped instead") + } + f.common.Helper() + return f.common.Skipped() +} + +// Add will add the arguments to the seed corpus for the fuzz test. This will be +// a no-op if called after or within the fuzz target, and args must match the +// arguments for the fuzz target. +func (f *F) Add(args ...any) { + var values []any + for i := range args { + if t := reflect.TypeOf(args[i]); !supportedTypes[t] { + panic(fmt.Sprintf("testing: unsupported type to Add %v", t)) + } + values = append(values, args[i]) + } + f.corpus = append(f.corpus, corpusEntry{Values: values, IsSeed: true, Path: fmt.Sprintf("seed#%d", len(f.corpus))}) +} + +// supportedTypes represents all of the supported types which can be fuzzed. +var supportedTypes = map[reflect.Type]bool{ + reflect.TypeOf(([]byte)("")): true, + reflect.TypeOf((string)("")): true, + reflect.TypeOf((bool)(false)): true, + reflect.TypeOf((byte)(0)): true, + reflect.TypeOf((rune)(0)): true, + reflect.TypeOf((float32)(0)): true, + reflect.TypeOf((float64)(0)): true, + reflect.TypeOf((int)(0)): true, + reflect.TypeOf((int8)(0)): true, + reflect.TypeOf((int16)(0)): true, + reflect.TypeOf((int32)(0)): true, + reflect.TypeOf((int64)(0)): true, + reflect.TypeOf((uint)(0)): true, + reflect.TypeOf((uint8)(0)): true, + reflect.TypeOf((uint16)(0)): true, + reflect.TypeOf((uint32)(0)): true, + reflect.TypeOf((uint64)(0)): true, +} + +// Fuzz runs the fuzz function, ff, for fuzz testing. If ff fails for a set of +// arguments, those arguments will be added to the seed corpus. +// +// ff must be a function with no return value whose first argument is *T and +// whose remaining arguments are the types to be fuzzed. +// For example: +// +// f.Fuzz(func(t *testing.T, b []byte, i int) { ... }) +// +// The following types are allowed: []byte, string, bool, byte, rune, float32, +// float64, int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64. +// More types may be supported in the future. +// +// ff must not call any *F methods, e.g. (*F).Log, (*F).Error, (*F).Skip. Use +// the corresponding *T method instead. The only *F methods that are allowed in +// the (*F).Fuzz function are (*F).Failed and (*F).Name. +// +// This function should be fast and deterministic, and its behavior should not +// depend on shared state. No mutable input arguments, or pointers to them, +// should be retained between executions of the fuzz function, as the memory +// backing them may be mutated during a subsequent invocation. ff must not +// modify the underlying data of the arguments provided by the fuzzing engine. +// +// When fuzzing, F.Fuzz does not return until a problem is found, time runs out +// (set with -fuzztime), or the test process is interrupted by a signal. F.Fuzz +// should be called exactly once, unless F.Skip or [F.Fail] is called beforehand. +func (f *F) Fuzz(ff any) { + if f.fuzzCalled { + panic("testing: F.Fuzz called more than once") + } + f.fuzzCalled = true + if f.failed { + return + } + f.Helper() + + // ff should be in the form func(*testing.T, ...interface{}) + fn := reflect.ValueOf(ff) + fnType := fn.Type() + if fnType.Kind() != reflect.Func { + panic("testing: F.Fuzz must receive a function") + } + if fnType.NumIn() < 2 || fnType.In(0) != reflect.TypeOf((*T)(nil)) { + panic("testing: fuzz target must receive at least two arguments, where the first argument is a *T") + } + if fnType.NumOut() != 0 { + panic("testing: fuzz target must not return a value") + } + + // Save the types of the function to compare against the corpus. + var types []reflect.Type + for i := 1; i < fnType.NumIn(); i++ { + t := fnType.In(i) + if !supportedTypes[t] { + panic(fmt.Sprintf("testing: unsupported type for fuzzing %v", t)) + } + types = append(types, t) + } + + // Load the testdata seed corpus. Check types of entries in the testdata + // corpus and entries declared with F.Add. + // + // Don't load the seed corpus if this is a worker process; we won't use it. + if f.fstate.mode != fuzzWorker { + for _, c := range f.corpus { + if err := f.fstate.deps.CheckCorpus(c.Values, types); err != nil { + // TODO(#48302): Report the source location of the F.Add call. + f.Fatal(err) + } + } + + // Load seed corpus + c, err := f.fstate.deps.ReadCorpus(filepath.Join(corpusDir, f.name), types) + if err != nil { + f.Fatal(err) + } + for i := range c { + c[i].IsSeed = true // these are all seed corpus values + if f.fstate.mode == fuzzCoordinator { + // If this is the coordinator process, zero the values, since we don't need + // to hold onto them. + c[i].Values = nil + } + } + + f.corpus = append(f.corpus, c...) + } + + // run calls fn on a given input, as a subtest with its own T. + // run is analogous to T.Run. The test filtering and cleanup works similarly. + // fn is called in its own goroutine. + run := func(captureOut io.Writer, e corpusEntry) (ok bool) { + if e.Values == nil { + // The corpusEntry must have non-nil Values in order to run the + // test. If Values is nil, it is a bug in our code. + panic(fmt.Sprintf("corpus file %q was not unmarshaled", e.Path)) + } + if shouldFailFast() { + return true + } + testName := f.name + if e.Path != "" { + testName = fmt.Sprintf("%s/%s", testName, filepath.Base(e.Path)) + } + if f.tstate.isFuzzing { + // Don't preserve subtest names while fuzzing. If fn calls T.Run, + // there will be a very large number of subtests with duplicate names, + // which will use a large amount of memory. The subtest names aren't + // useful since there's no way to re-run them deterministically. + f.tstate.match.clearSubNames() + } + + ctx, cancelCtx := context.WithCancel(f.ctx) + + // Record the stack trace at the point of this call so that if the subtest + // function - which runs in a separate stack - is marked as a helper, we can + // continue walking the stack into the parent test. + var pc [maxStackLen]uintptr + n := runtime.Callers(2, pc[:]) + t := &T{ + common: common{ + barrier: make(chan bool), + signal: make(chan bool), + name: testName, + parent: &f.common, + level: f.level + 1, + creator: pc[:n], + chatty: f.chatty, + ctx: ctx, + cancelCtx: cancelCtx, + }, + tstate: f.tstate, + } + if captureOut != nil { + // t.parent aliases f.common. + t.parent.w = captureOut + } + t.w = indenter{&t.common} + if t.chatty != nil { + t.chatty.Updatef(t.name, "=== RUN %s\n", t.name) + } + f.common.inFuzzFn, f.inFuzzFn = true, true + go tRunner(t, func(t *T) { + args := []reflect.Value{reflect.ValueOf(t)} + for _, v := range e.Values { + args = append(args, reflect.ValueOf(v)) + } + // Before resetting the current coverage, defer the snapshot so that + // we make sure it is called right before the tRunner function + // exits, regardless of whether it was executed cleanly, panicked, + // or if the fuzzFn called t.Fatal. + if f.tstate.isFuzzing { + defer f.fstate.deps.SnapshotCoverage() + f.fstate.deps.ResetCoverage() + } + fn.Call(args) + }) + <-t.signal + if t.chatty != nil && t.chatty.json { + t.chatty.Updatef(t.parent.name, "=== NAME %s\n", t.parent.name) + } + f.common.inFuzzFn, f.inFuzzFn = false, false + return !t.Failed() + } + + switch f.fstate.mode { + case fuzzCoordinator: + // Fuzzing is enabled, and this is the test process started by 'go test'. + // Act as the coordinator process, and coordinate workers to perform the + // actual fuzzing. + corpusTargetDir := filepath.Join(corpusDir, f.name) + cacheTargetDir := filepath.Join(*fuzzCacheDir, f.name) + err := f.fstate.deps.CoordinateFuzzing( + fuzzDuration.d, + int64(fuzzDuration.n), + minimizeDuration.d, + int64(minimizeDuration.n), + *parallel, + f.corpus, + types, + corpusTargetDir, + cacheTargetDir) + if err != nil { + f.result = fuzzResult{Error: err} + f.Fail() + fmt.Fprintf(f.w, "%v\n", err) + if crashErr, ok := err.(fuzzCrashError); ok { + crashPath := crashErr.CrashPath() + fmt.Fprintf(f.w, "Failing input written to %s\n", crashPath) + testName := filepath.Base(crashPath) + fmt.Fprintf(f.w, "To re-run:\ngo test -run=%s/%s\n", f.name, testName) + } + } + // TODO(jayconrod,katiehockman): Aggregate statistics across workers + // and add to FuzzResult (ie. time taken, num iterations) + + case fuzzWorker: + // Fuzzing is enabled, and this is a worker process. Follow instructions + // from the coordinator. + if err := f.fstate.deps.RunFuzzWorker(func(e corpusEntry) error { + // Don't write to f.w (which points to Stdout) if running from a + // fuzz worker. This would become very verbose, particularly during + // minimization. Return the error instead, and let the caller deal + // with the output. + var buf strings.Builder + if ok := run(&buf, e); !ok { + return errors.New(buf.String()) + } + return nil + }); err != nil { + // Internal errors are marked with f.Fail; user code may call this too, before F.Fuzz. + // The worker will exit with fuzzWorkerExitCode, indicating this is a failure + // (and 'go test' should exit non-zero) but a failing input should not be recorded. + f.Errorf("communicating with fuzzing coordinator: %v", err) + } + + default: + // Fuzzing is not enabled, or will be done later. Only run the seed + // corpus now. + for _, e := range f.corpus { + name := fmt.Sprintf("%s/%s", f.name, filepath.Base(e.Path)) + if _, ok, _ := f.tstate.match.fullName(nil, name); ok { + run(f.w, e) + } + } + } +} + +func (f *F) report() { + if *isFuzzWorker || f.parent == nil { + return + } + dstr := fmtDuration(f.duration) + format := "--- %s: %s (%s)\n" + if f.Failed() { + f.flushToParent(f.name, format, "FAIL", f.name, dstr) + } else if f.chatty != nil { + if f.Skipped() { + f.flushToParent(f.name, format, "SKIP", f.name, dstr) + } else { + f.flushToParent(f.name, format, "PASS", f.name, dstr) + } + } +} + +// fuzzResult contains the results of a fuzz run. +type fuzzResult struct { + N int // The number of iterations. + T time.Duration // The total time taken. + Error error // Error is the error from the failing input +} + +func (r fuzzResult) String() string { + if r.Error == nil { + return "" + } + return r.Error.Error() +} + +// fuzzCrashError is satisfied by a failing input detected while fuzzing. +// These errors are written to the seed corpus and can be re-run with 'go test'. +// Errors within the fuzzing framework (like I/O errors between coordinator +// and worker processes) don't satisfy this interface. +type fuzzCrashError interface { + error + Unwrap() error + + // CrashPath returns the path of the subtest that corresponds to the saved + // crash input file in the seed corpus. The test can be re-run with go test + // -run=$test/$name $test is the fuzz test name, and $name is the + // filepath.Base of the string returned here. + CrashPath() string +} + +// fuzzState holds fields common to all fuzz tests. +type fuzzState struct { + deps testDeps + mode fuzzMode +} + +type fuzzMode uint8 + +const ( + seedCorpusOnly fuzzMode = iota + fuzzCoordinator + fuzzWorker +) + +// runFuzzTests runs the fuzz tests matching the pattern for -run. This will +// only run the (*F).Fuzz function for each seed corpus without using the +// fuzzing engine to generate or mutate inputs. +func runFuzzTests(deps testDeps, fuzzTests []InternalFuzzTarget, deadline time.Time) (ran, ok bool) { + ok = true + if len(fuzzTests) == 0 || *isFuzzWorker { + return ran, ok + } + m := newMatcher(deps.MatchString, *match, "-test.run", *skip) + var mFuzz *matcher + if *matchFuzz != "" { + mFuzz = newMatcher(deps.MatchString, *matchFuzz, "-test.fuzz", *skip) + } + + for _, procs := range cpuList { + runtime.GOMAXPROCS(procs) + for i := uint(0); i < *count; i++ { + if shouldFailFast() { + break + } + + tstate := newTestState(*parallel, m) + tstate.deadline = deadline + fstate := &fuzzState{deps: deps, mode: seedCorpusOnly} + root := common{w: os.Stdout} // gather output in one place + if Verbose() { + root.chatty = newChattyPrinter(root.w) + } + for _, ft := range fuzzTests { + if shouldFailFast() { + break + } + testName, matched, _ := tstate.match.fullName(nil, ft.Name) + if !matched { + continue + } + if mFuzz != nil { + if _, fuzzMatched, _ := mFuzz.fullName(nil, ft.Name); fuzzMatched { + // If this will be fuzzed, then don't run the seed corpus + // right now. That will happen later. + continue + } + } + ctx, cancelCtx := context.WithCancel(context.Background()) + f := &F{ + common: common{ + signal: make(chan bool), + barrier: make(chan bool), + name: testName, + parent: &root, + level: root.level + 1, + chatty: root.chatty, + ctx: ctx, + cancelCtx: cancelCtx, + }, + tstate: tstate, + fstate: fstate, + } + f.w = indenter{&f.common} + if f.chatty != nil { + f.chatty.Updatef(f.name, "=== RUN %s\n", f.name) + } + go fRunner(f, ft.Fn) + <-f.signal + if f.chatty != nil && f.chatty.json { + f.chatty.Updatef(f.parent.name, "=== NAME %s\n", f.parent.name) + } + ok = ok && !f.Failed() + ran = ran || f.ran + } + if !ran { + // There were no tests to run on this iteration. + // This won't change, so no reason to keep trying. + break + } + } + } + + return ran, ok +} + +// runFuzzing runs the fuzz test matching the pattern for -fuzz. Only one such +// fuzz test must match. This will run the fuzzing engine to generate and +// mutate new inputs against the fuzz target. +// +// If fuzzing is disabled (-test.fuzz is not set), runFuzzing +// returns immediately. +func runFuzzing(deps testDeps, fuzzTests []InternalFuzzTarget) (ok bool) { + if len(fuzzTests) == 0 || *matchFuzz == "" { + return true + } + m := newMatcher(deps.MatchString, *matchFuzz, "-test.fuzz", *skip) + tstate := newTestState(1, m) + tstate.isFuzzing = true + fstate := &fuzzState{ + deps: deps, + } + root := common{w: os.Stdout} + if *isFuzzWorker { + root.w = io.Discard + fstate.mode = fuzzWorker + } else { + fstate.mode = fuzzCoordinator + } + if Verbose() && !*isFuzzWorker { + root.chatty = newChattyPrinter(root.w) + } + var fuzzTest *InternalFuzzTarget + var testName string + var matched []string + for i := range fuzzTests { + name, ok, _ := tstate.match.fullName(nil, fuzzTests[i].Name) + if !ok { + continue + } + matched = append(matched, name) + fuzzTest = &fuzzTests[i] + testName = name + } + if len(matched) == 0 { + fmt.Fprintln(os.Stderr, "testing: warning: no fuzz tests to fuzz") + return true + } + if len(matched) > 1 { + fmt.Fprintf(os.Stderr, "testing: will not fuzz, -fuzz matches more than one fuzz test: %v\n", matched) + return false + } + + ctx, cancelCtx := context.WithCancel(context.Background()) + f := &F{ + common: common{ + signal: make(chan bool), + barrier: nil, // T.Parallel has no effect when fuzzing. + name: testName, + parent: &root, + level: root.level + 1, + chatty: root.chatty, + ctx: ctx, + cancelCtx: cancelCtx, + }, + fstate: fstate, + tstate: tstate, + } + f.w = indenter{&f.common} + if f.chatty != nil { + f.chatty.Updatef(f.name, "=== RUN %s\n", f.name) + } + go fRunner(f, fuzzTest.Fn) + <-f.signal + if f.chatty != nil { + f.chatty.Updatef(f.parent.name, "=== NAME %s\n", f.parent.name) + } + return !f.failed +} + +// fRunner wraps a call to a fuzz test and ensures that cleanup functions are +// called and status flags are set. fRunner should be called in its own +// goroutine. To wait for its completion, receive from f.signal. +// +// fRunner is analogous to tRunner, which wraps subtests started with T.Run. +// Unit tests and fuzz tests work a little differently, so for now, these +// functions aren't consolidated. In particular, because there are no F.Run and +// F.Parallel methods, i.e., no fuzz sub-tests or parallel fuzz tests, a few +// simplifications are made. We also require that F.Fuzz, F.Skip, or F.Fail is +// called. +func fRunner(f *F, fn func(*F)) { + // When this goroutine is done, either because runtime.Goexit was called, a + // panic started, or fn returned normally, record the duration and send + // t.signal, indicating the fuzz test is done. + defer func() { + // Detect whether the fuzz test panicked or called runtime.Goexit + // without calling F.Fuzz, F.Fail, or F.Skip. If it did, panic (possibly + // replacing a nil panic value). Nothing should recover after fRunner + // unwinds, so this should crash the process and print stack. + // Unfortunately, recovering here adds stack frames, but the location of + // the original panic should still be + // clear. + f.checkRaces() + if f.Failed() { + numFailed.Add(1) + } + err := recover() + if err == nil { + f.mu.RLock() + fuzzNotCalled := !f.fuzzCalled && !f.skipped && !f.failed + if !f.finished && !f.skipped && !f.failed { + err = errNilPanicOrGoexit + } + f.mu.RUnlock() + if fuzzNotCalled && err == nil { + f.Error("returned without calling F.Fuzz, F.Fail, or F.Skip") + } + } + + // Use a deferred call to ensure that we report that the test is + // complete even if a cleanup function calls F.FailNow. See issue 41355. + didPanic := false + defer func() { + if !didPanic { + // Only report that the test is complete if it doesn't panic, + // as otherwise the test binary can exit before the panic is + // reported to the user. See issue 41479. + f.signal <- true + } + }() + + // If we recovered a panic or inappropriate runtime.Goexit, fail the test, + // flush the output log up to the root, then panic. + doPanic := func(err any) { + f.Fail() + if r := f.runCleanup(recoverAndReturnPanic); r != nil { + f.Logf("cleanup panicked with %v", r) + } + for root := &f.common; root.parent != nil; root = root.parent { + root.mu.Lock() + root.duration += highPrecisionTimeSince(root.start) + d := root.duration + root.mu.Unlock() + root.flushToParent(root.name, "--- FAIL: %s (%s)\n", root.name, fmtDuration(d)) + } + didPanic = true + panic(err) + } + if err != nil { + doPanic(err) + } + + // No panic or inappropriate Goexit. + f.duration += highPrecisionTimeSince(f.start) + + if len(f.sub) > 0 { + // Unblock inputs that called T.Parallel while running the seed corpus. + // This only affects fuzz tests run as normal tests. + // While fuzzing, T.Parallel has no effect, so f.sub is empty, and this + // branch is not taken. f.barrier is nil in that case. + f.tstate.release() + close(f.barrier) + // Wait for the subtests to complete. + for _, sub := range f.sub { + <-sub.signal + } + cleanupStart := highPrecisionTimeNow() + err := f.runCleanup(recoverAndReturnPanic) + f.duration += highPrecisionTimeSince(cleanupStart) + if err != nil { + doPanic(err) + } + } + + // Report after all subtests have finished. + f.report() + f.done = true + f.setRan() + }() + defer func() { + if len(f.sub) == 0 { + f.runCleanup(normalPanic) + } + }() + + f.start = highPrecisionTimeNow() + f.resetRaces() + fn(f) + + // Code beyond this point will not be executed when FailNow or SkipNow + // is invoked. + f.mu.Lock() + f.finished = true + f.mu.Unlock() +} diff --git a/testing/testing/helper_test.go b/testing/testing/helper_test.go new file mode 100644 index 0000000..1cf8156 --- /dev/null +++ b/testing/testing/helper_test.go @@ -0,0 +1,105 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package testing_test + +import ( + "os" + "regexp" + "strings" + "testing" + + "github.com/CodSpeedHQ/codspeed-go/testing/internal/testenv" +) + +func TestTBHelper(t *testing.T) { + if os.Getenv("GO_WANT_HELPER_PROCESS") == "1" { + testTestHelper(t) + + // Check that calling Helper from inside a top-level test function + // has no effect. + t.Helper() + t.Error("8") + return + } + + t.Parallel() + + cmd := testenv.Command(t, testenv.Executable(t), "-test.run=^TestTBHelper$") + cmd = testenv.CleanCmdEnv(cmd) + cmd.Env = append(cmd.Env, "GO_WANT_HELPER_PROCESS=1") + out, _ := cmd.CombinedOutput() + + want := `--- FAIL: TestTBHelper \([^)]+\) + helperfuncs_test.go:15: 0 + helperfuncs_test.go:47: 1 + helperfuncs_test.go:24: 2 + helperfuncs_test.go:49: 3 + helperfuncs_test.go:56: 4 + --- FAIL: TestTBHelper/sub \([^)]+\) + helperfuncs_test.go:59: 5 + helperfuncs_test.go:24: 6 + helperfuncs_test.go:58: 7 + --- FAIL: TestTBHelper/sub2 \([^)]+\) + helperfuncs_test.go:80: 11 + helperfuncs_test.go:84: recover 12 + helperfuncs_test.go:86: GenericFloat64 + helperfuncs_test.go:87: GenericInt + helper_test.go:22: 8 + helperfuncs_test.go:73: 9 + helperfuncs_test.go:69: 10 +` + if !regexp.MustCompile(want).Match(out) { + t.Errorf("got output:\n\n%s\nwant matching:\n\n%s", out, want) + } +} + +func TestTBHelperParallel(t *testing.T) { + if os.Getenv("GO_WANT_HELPER_PROCESS") == "1" { + parallelTestHelper(t) + return + } + + t.Parallel() + + cmd := testenv.Command(t, testenv.Executable(t), "-test.run=^TestTBHelperParallel$") + cmd = testenv.CleanCmdEnv(cmd) + cmd.Env = append(cmd.Env, "GO_WANT_HELPER_PROCESS=1") + out, _ := cmd.CombinedOutput() + + t.Logf("output:\n%s", out) + + lines := strings.Split(strings.TrimSpace(string(out)), "\n") + + // We expect to see one "--- FAIL" line at the start + // of the log, five lines of "parallel" logging, + // and a final "FAIL" line at the end of the test. + const wantLines = 7 + + if len(lines) != wantLines { + t.Fatalf("parallelTestHelper gave %d lines of output; want %d", len(lines), wantLines) + } + want := "helperfuncs_test.go:24: parallel" + if got := strings.TrimSpace(lines[1]); got != want { + t.Errorf("got second output line %q; want %q", got, want) + } +} + +func BenchmarkTBHelper(b *testing.B) { + f1 := func() { + b.Helper() + } + f2 := func() { + b.Helper() + } + b.ResetTimer() + b.ReportAllocs() + for i := 0; i < b.N; i++ { + if i&1 == 0 { + f1() + } else { + f2() + } + } +} diff --git a/testing/testing/helperfuncs_test.go b/testing/testing/helperfuncs_test.go new file mode 100644 index 0000000..f0295f3 --- /dev/null +++ b/testing/testing/helperfuncs_test.go @@ -0,0 +1,124 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package testing_test + +import ( + "sync" + "testing" +) + +// The line numbering of this file is important for TestTBHelper. + +func notHelper(t *testing.T, msg string) { + t.Error(msg) +} + +func helper(t *testing.T, msg string) { + t.Helper() + t.Error(msg) +} + +func notHelperCallingHelper(t *testing.T, msg string) { + helper(t, msg) +} + +func helperCallingHelper(t *testing.T, msg string) { + t.Helper() + helper(t, msg) +} + +func genericHelper[G any](t *testing.T, msg string) { + t.Helper() + t.Error(msg) +} + +var genericIntHelper = genericHelper[int] + +func testTestHelper(t *testing.T) { + testHelper(t) +} + +func testHelper(t *testing.T) { + // Check combinations of directly and indirectly + // calling helper functions. + notHelper(t, "0") + helper(t, "1") + notHelperCallingHelper(t, "2") + helperCallingHelper(t, "3") + + // Check a function literal closing over t that uses Helper. + fn := func(msg string) { + t.Helper() + t.Error(msg) + } + fn("4") + + t.Run("sub", func(t *testing.T) { + helper(t, "5") + notHelperCallingHelper(t, "6") + // Check that calling Helper from inside a subtest entry function + // works as if it were in an ordinary function call. + t.Helper() + t.Error("7") + }) + + // Check that right caller is reported for func passed to Cleanup when + // multiple cleanup functions have been registered. + t.Cleanup(func() { + t.Helper() + t.Error("10") + }) + t.Cleanup(func() { + t.Helper() + t.Error("9") + }) + + // Check that helper-ness propagates up through subtests + // to helpers above. See https://golang.org/issue/44887. + helperSubCallingHelper(t, "11") + + // Check that helper-ness propagates up through panic/recover. + // See https://golang.org/issue/31154. + recoverHelper(t, "12") + + genericHelper[float64](t, "GenericFloat64") + genericIntHelper(t, "GenericInt") +} + +func parallelTestHelper(t *testing.T) { + var wg sync.WaitGroup + for i := 0; i < 5; i++ { + wg.Add(1) + go func() { + notHelperCallingHelper(t, "parallel") + wg.Done() + }() + } + wg.Wait() +} + +func helperSubCallingHelper(t *testing.T, msg string) { + t.Helper() + t.Run("sub2", func(t *testing.T) { + t.Helper() + t.Fatal(msg) + }) +} + +func recoverHelper(t *testing.T, msg string) { + t.Helper() + defer func() { + t.Helper() + if err := recover(); err != nil { + t.Errorf("recover %s", err) + } + }() + doPanic(t, msg) +} + +func doPanic(t *testing.T, msg string) { + t.Helper() + panic(msg) +} diff --git a/testing/testing/internal/testdeps/deps.go b/testing/testing/internal/testdeps/deps.go new file mode 100644 index 0000000..e27c173 --- /dev/null +++ b/testing/testing/internal/testdeps/deps.go @@ -0,0 +1,241 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package testdeps provides access to dependencies needed by test execution. +// +// This package is imported by the generated main package, which passes +// TestDeps into testing.Main. This allows tests to use packages at run time +// without making those packages direct dependencies of package testing. +// Direct dependencies of package testing are harder to write tests for. +package testdeps + +import ( + "bufio" + "context" + "io" + "os" + "os/signal" + "reflect" + "regexp" + "runtime/pprof" + "strings" + "sync" + "time" + + "github.com/CodSpeedHQ/codspeed-go/testing/internal/fuzz" + "github.com/CodSpeedHQ/codspeed-go/testing/internal/testlog" +) + +// Cover indicates whether coverage is enabled. +var Cover bool + +// TestDeps is an implementation of the testing.testDeps interface, +// suitable for passing to [testing.MainStart]. +type TestDeps struct{} + +var matchPat string +var matchRe *regexp.Regexp + +func (TestDeps) MatchString(pat, str string) (result bool, err error) { + if matchRe == nil || matchPat != pat { + matchPat = pat + matchRe, err = regexp.Compile(matchPat) + if err != nil { + return + } + } + return matchRe.MatchString(str), nil +} + +func (TestDeps) StartCPUProfile(w io.Writer) error { + return pprof.StartCPUProfile(w) +} + +func (TestDeps) StopCPUProfile() { + pprof.StopCPUProfile() +} + +func (TestDeps) WriteProfileTo(name string, w io.Writer, debug int) error { + return pprof.Lookup(name).WriteTo(w, debug) +} + +// ImportPath is the import path of the testing binary, set by the generated main function. +var ImportPath string + +func (TestDeps) ImportPath() string { + return ImportPath +} + +// testLog implements testlog.Interface, logging actions by package os. +type testLog struct { + mu sync.Mutex + w *bufio.Writer + set bool +} + +func (l *testLog) Getenv(key string) { + l.add("getenv", key) +} + +func (l *testLog) Open(name string) { + l.add("open", name) +} + +func (l *testLog) Stat(name string) { + l.add("stat", name) +} + +func (l *testLog) Chdir(name string) { + l.add("chdir", name) +} + +// add adds the (op, name) pair to the test log. +func (l *testLog) add(op, name string) { + if strings.Contains(name, "\n") || name == "" { + return + } + + l.mu.Lock() + defer l.mu.Unlock() + if l.w == nil { + return + } + l.w.WriteString(op) + l.w.WriteByte(' ') + l.w.WriteString(name) + l.w.WriteByte('\n') +} + +var log testLog + +func (TestDeps) StartTestLog(w io.Writer) { + log.mu.Lock() + log.w = bufio.NewWriter(w) + if !log.set { + // Tests that define TestMain and then run m.Run multiple times + // will call StartTestLog/StopTestLog multiple times. + // Checking log.set avoids calling testlog.SetLogger multiple times + // (which will panic) and also avoids writing the header multiple times. + log.set = true + testlog.SetLogger(&log) + log.w.WriteString("# test log\n") // known to cmd/go/internal/test/test.go + } + log.mu.Unlock() +} + +func (TestDeps) StopTestLog() error { + log.mu.Lock() + defer log.mu.Unlock() + err := log.w.Flush() + log.w = nil + return err +} + +// SetPanicOnExit0 tells the os package whether to panic on os.Exit(0). +func (TestDeps) SetPanicOnExit0(v bool) { + testlog.SetPanicOnExit0(v) +} + +func (TestDeps) CoordinateFuzzing( + timeout time.Duration, + limit int64, + minimizeTimeout time.Duration, + minimizeLimit int64, + parallel int, + seed []fuzz.CorpusEntry, + types []reflect.Type, + corpusDir, + cacheDir string) (err error) { + // Fuzzing may be interrupted with a timeout or if the user presses ^C. + // In either case, we'll stop worker processes gracefully and save + // crashers and interesting values. + ctx, cancel := signal.NotifyContext(context.Background(), os.Interrupt) + defer cancel() + err = fuzz.CoordinateFuzzing(ctx, fuzz.CoordinateFuzzingOpts{ + Log: os.Stderr, + Timeout: timeout, + Limit: limit, + MinimizeTimeout: minimizeTimeout, + MinimizeLimit: minimizeLimit, + Parallel: parallel, + Seed: seed, + Types: types, + CorpusDir: corpusDir, + CacheDir: cacheDir, + }) + if err == ctx.Err() { + return nil + } + return err +} + +func (TestDeps) RunFuzzWorker(fn func(fuzz.CorpusEntry) error) error { + // Worker processes may or may not receive a signal when the user presses ^C + // On POSIX operating systems, a signal sent to a process group is delivered + // to all processes in that group. This is not the case on Windows. + // If the worker is interrupted, return quickly and without error. + // If only the coordinator process is interrupted, it tells each worker + // process to stop by closing its "fuzz_in" pipe. + ctx, cancel := signal.NotifyContext(context.Background(), os.Interrupt) + defer cancel() + err := fuzz.RunFuzzWorker(ctx, fn) + if err == ctx.Err() { + return nil + } + return err +} + +func (TestDeps) ReadCorpus(dir string, types []reflect.Type) ([]fuzz.CorpusEntry, error) { + return fuzz.ReadCorpus(dir, types) +} + +func (TestDeps) CheckCorpus(vals []any, types []reflect.Type) error { + return fuzz.CheckCorpus(vals, types) +} + +func (TestDeps) ResetCoverage() { + fuzz.ResetCoverage() +} + +func (TestDeps) SnapshotCoverage() { + fuzz.SnapshotCoverage() +} + +var CoverMode string +var Covered string +var CoverSelectedPackages []string + +// These variables below are set at runtime (via code in testmain) to point +// to the equivalent functions in package internal/coverage/cfile; doing +// things this way allows us to have tests import internal/coverage/cfile +// only when -cover is in effect (as opposed to importing for all tests). +var ( + CoverSnapshotFunc func() float64 + CoverProcessTestDirFunc func(dir string, cfile string, cm string, cpkg string, w io.Writer, selpkgs []string) error + CoverMarkProfileEmittedFunc func(val bool) +) + +func (TestDeps) InitRuntimeCoverage() (mode string, tearDown func(string, string) (string, error), snapcov func() float64) { + if CoverMode == "" { + return + } + return CoverMode, coverTearDown, CoverSnapshotFunc +} + +func coverTearDown(coverprofile string, gocoverdir string) (string, error) { + var err error + if gocoverdir == "" { + gocoverdir, err = os.MkdirTemp("", "gocoverdir") + if err != nil { + return "error setting GOCOVERDIR: bad os.MkdirTemp return", err + } + defer os.RemoveAll(gocoverdir) + } + CoverMarkProfileEmittedFunc(true) + cmode := CoverMode + if err := CoverProcessTestDirFunc(gocoverdir, coverprofile, cmode, Covered, os.Stdout, CoverSelectedPackages); err != nil { + return "error generating coverage report", err + } + return "", nil +} diff --git a/testing/testing/iotest/example_test.go b/testing/testing/iotest/example_test.go new file mode 100644 index 0000000..10f6bd3 --- /dev/null +++ b/testing/testing/iotest/example_test.go @@ -0,0 +1,22 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package iotest_test + +import ( + "errors" + "fmt" + "testing/iotest" +) + +func ExampleErrReader() { + // A reader that always returns a custom error. + r := iotest.ErrReader(errors.New("custom error")) + n, err := r.Read(nil) + fmt.Printf("n: %d\nerr: %q\n", n, err) + + // Output: + // n: 0 + // err: "custom error" +} diff --git a/testing/testing/iotest/logger.go b/testing/testing/iotest/logger.go new file mode 100644 index 0000000..10d0cb5 --- /dev/null +++ b/testing/testing/iotest/logger.go @@ -0,0 +1,54 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package iotest + +import ( + "io" + "log" +) + +type writeLogger struct { + prefix string + w io.Writer +} + +func (l *writeLogger) Write(p []byte) (n int, err error) { + n, err = l.w.Write(p) + if err != nil { + log.Printf("%s %x: %v", l.prefix, p[0:n], err) + } else { + log.Printf("%s %x", l.prefix, p[0:n]) + } + return +} + +// NewWriteLogger returns a writer that behaves like w except +// that it logs (using [log.Printf]) each write to standard error, +// printing the prefix and the hexadecimal data written. +func NewWriteLogger(prefix string, w io.Writer) io.Writer { + return &writeLogger{prefix, w} +} + +type readLogger struct { + prefix string + r io.Reader +} + +func (l *readLogger) Read(p []byte) (n int, err error) { + n, err = l.r.Read(p) + if err != nil { + log.Printf("%s %x: %v", l.prefix, p[0:n], err) + } else { + log.Printf("%s %x", l.prefix, p[0:n]) + } + return +} + +// NewReadLogger returns a reader that behaves like r except +// that it logs (using [log.Printf]) each read to standard error, +// printing the prefix and the hexadecimal data read. +func NewReadLogger(prefix string, r io.Reader) io.Reader { + return &readLogger{prefix, r} +} diff --git a/testing/testing/iotest/logger_test.go b/testing/testing/iotest/logger_test.go new file mode 100644 index 0000000..7a7d0aa --- /dev/null +++ b/testing/testing/iotest/logger_test.go @@ -0,0 +1,153 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package iotest + +import ( + "bytes" + "errors" + "fmt" + "log" + "strings" + "testing" +) + +type errWriter struct { + err error +} + +func (w errWriter) Write([]byte) (int, error) { + return 0, w.err +} + +func TestWriteLogger(t *testing.T) { + olw := log.Writer() + olf := log.Flags() + olp := log.Prefix() + + // Revert the original log settings before we exit. + defer func() { + log.SetFlags(olf) + log.SetPrefix(olp) + log.SetOutput(olw) + }() + + lOut := new(strings.Builder) + log.SetPrefix("lw: ") + log.SetOutput(lOut) + log.SetFlags(0) + + lw := new(strings.Builder) + wl := NewWriteLogger("write:", lw) + if _, err := wl.Write([]byte("Hello, World!")); err != nil { + t.Fatalf("Unexpectedly failed to write: %v", err) + } + + if g, w := lw.String(), "Hello, World!"; g != w { + t.Errorf("WriteLogger mismatch\n\tgot: %q\n\twant: %q", g, w) + } + wantLogWithHex := fmt.Sprintf("lw: write: %x\n", "Hello, World!") + if g, w := lOut.String(), wantLogWithHex; g != w { + t.Errorf("WriteLogger mismatch\n\tgot: %q\n\twant: %q", g, w) + } +} + +func TestWriteLogger_errorOnWrite(t *testing.T) { + olw := log.Writer() + olf := log.Flags() + olp := log.Prefix() + + // Revert the original log settings before we exit. + defer func() { + log.SetFlags(olf) + log.SetPrefix(olp) + log.SetOutput(olw) + }() + + lOut := new(strings.Builder) + log.SetPrefix("lw: ") + log.SetOutput(lOut) + log.SetFlags(0) + + lw := errWriter{err: errors.New("Write Error!")} + wl := NewWriteLogger("write:", lw) + if _, err := wl.Write([]byte("Hello, World!")); err == nil { + t.Fatalf("Unexpectedly succeeded to write: %v", err) + } + + wantLogWithHex := fmt.Sprintf("lw: write: %x: %v\n", "", "Write Error!") + if g, w := lOut.String(), wantLogWithHex; g != w { + t.Errorf("WriteLogger mismatch\n\tgot: %q\n\twant: %q", g, w) + } +} + +func TestReadLogger(t *testing.T) { + olw := log.Writer() + olf := log.Flags() + olp := log.Prefix() + + // Revert the original log settings before we exit. + defer func() { + log.SetFlags(olf) + log.SetPrefix(olp) + log.SetOutput(olw) + }() + + lOut := new(strings.Builder) + log.SetPrefix("lr: ") + log.SetOutput(lOut) + log.SetFlags(0) + + data := []byte("Hello, World!") + p := make([]byte, len(data)) + lr := bytes.NewReader(data) + rl := NewReadLogger("read:", lr) + + n, err := rl.Read(p) + if err != nil { + t.Fatalf("Unexpectedly failed to read: %v", err) + } + + if g, w := p[:n], data; !bytes.Equal(g, w) { + t.Errorf("ReadLogger mismatch\n\tgot: %q\n\twant: %q", g, w) + } + + wantLogWithHex := fmt.Sprintf("lr: read: %x\n", "Hello, World!") + if g, w := lOut.String(), wantLogWithHex; g != w { + t.Errorf("ReadLogger mismatch\n\tgot: %q\n\twant: %q", g, w) + } +} + +func TestReadLogger_errorOnRead(t *testing.T) { + olw := log.Writer() + olf := log.Flags() + olp := log.Prefix() + + // Revert the original log settings before we exit. + defer func() { + log.SetFlags(olf) + log.SetPrefix(olp) + log.SetOutput(olw) + }() + + lOut := new(strings.Builder) + log.SetPrefix("lr: ") + log.SetOutput(lOut) + log.SetFlags(0) + + data := []byte("Hello, World!") + p := make([]byte, len(data)) + + lr := ErrReader(errors.New("io failure")) + rl := NewReadLogger("read", lr) + n, err := rl.Read(p) + if err == nil { + t.Fatalf("Unexpectedly succeeded to read: %v", err) + } + + wantLogWithHex := fmt.Sprintf("lr: read %x: io failure\n", p[:n]) + if g, w := lOut.String(), wantLogWithHex; g != w { + t.Errorf("ReadLogger mismatch\n\tgot: %q\n\twant: %q", g, w) + } +} diff --git a/testing/testing/iotest/reader.go b/testing/testing/iotest/reader.go new file mode 100644 index 0000000..8529e1c --- /dev/null +++ b/testing/testing/iotest/reader.go @@ -0,0 +1,268 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package iotest implements Readers and Writers useful mainly for testing. +package iotest + +import ( + "bytes" + "errors" + "fmt" + "io" +) + +// OneByteReader returns a Reader that implements +// each non-empty Read by reading one byte from r. +func OneByteReader(r io.Reader) io.Reader { return &oneByteReader{r} } + +type oneByteReader struct { + r io.Reader +} + +func (r *oneByteReader) Read(p []byte) (int, error) { + if len(p) == 0 { + return 0, nil + } + return r.r.Read(p[0:1]) +} + +// HalfReader returns a Reader that implements Read +// by reading half as many requested bytes from r. +func HalfReader(r io.Reader) io.Reader { return &halfReader{r} } + +type halfReader struct { + r io.Reader +} + +func (r *halfReader) Read(p []byte) (int, error) { + return r.r.Read(p[0 : (len(p)+1)/2]) +} + +// DataErrReader changes the way errors are handled by a Reader. Normally, a +// Reader returns an error (typically EOF) from the first Read call after the +// last piece of data is read. DataErrReader wraps a Reader and changes its +// behavior so the final error is returned along with the final data, instead +// of in the first call after the final data. +func DataErrReader(r io.Reader) io.Reader { return &dataErrReader{r, nil, make([]byte, 1024)} } + +type dataErrReader struct { + r io.Reader + unread []byte + data []byte +} + +func (r *dataErrReader) Read(p []byte) (n int, err error) { + // loop because first call needs two reads: + // one to get data and a second to look for an error. + for { + if len(r.unread) == 0 { + n1, err1 := r.r.Read(r.data) + r.unread = r.data[0:n1] + err = err1 + } + if n > 0 || err != nil { + break + } + n = copy(p, r.unread) + r.unread = r.unread[n:] + } + return +} + +// ErrTimeout is a fake timeout error. +var ErrTimeout = errors.New("timeout") + +// TimeoutReader returns [ErrTimeout] on the second read +// with no data. Subsequent calls to read succeed. +func TimeoutReader(r io.Reader) io.Reader { return &timeoutReader{r, 0} } + +type timeoutReader struct { + r io.Reader + count int +} + +func (r *timeoutReader) Read(p []byte) (int, error) { + r.count++ + if r.count == 2 { + return 0, ErrTimeout + } + return r.r.Read(p) +} + +// ErrReader returns an [io.Reader] that returns 0, err from all Read calls. +func ErrReader(err error) io.Reader { + return &errReader{err: err} +} + +type errReader struct { + err error +} + +func (r *errReader) Read(p []byte) (int, error) { + return 0, r.err +} + +type smallByteReader struct { + r io.Reader + off int + n int +} + +func (r *smallByteReader) Read(p []byte) (int, error) { + if len(p) == 0 { + return 0, nil + } + r.n = r.n%3 + 1 + n := r.n + if n > len(p) { + n = len(p) + } + n, err := r.r.Read(p[0:n]) + if err != nil && err != io.EOF { + err = fmt.Errorf("Read(%d bytes at offset %d): %v", n, r.off, err) + } + r.off += n + return n, err +} + +// TestReader tests that reading from r returns the expected file content. +// It does reads of different sizes, until EOF. +// If r implements [io.ReaderAt] or [io.Seeker], TestReader also checks +// that those operations behave as they should. +// +// If TestReader finds any misbehaviors, it returns an error reporting them. +// The error text may span multiple lines. +func TestReader(r io.Reader, content []byte) error { + if len(content) > 0 { + n, err := r.Read(nil) + if n != 0 || err != nil { + return fmt.Errorf("Read(0) = %d, %v, want 0, nil", n, err) + } + } + + data, err := io.ReadAll(&smallByteReader{r: r}) + if err != nil { + return err + } + if !bytes.Equal(data, content) { + return fmt.Errorf("ReadAll(small amounts) = %q\n\twant %q", data, content) + } + n, err := r.Read(make([]byte, 10)) + if n != 0 || err != io.EOF { + return fmt.Errorf("Read(10) at EOF = %v, %v, want 0, EOF", n, err) + } + + if r, ok := r.(io.ReadSeeker); ok { + // Seek(0, 1) should report the current file position (EOF). + if off, err := r.Seek(0, 1); off != int64(len(content)) || err != nil { + return fmt.Errorf("Seek(0, 1) from EOF = %d, %v, want %d, nil", off, err, len(content)) + } + + // Seek backward partway through file, in two steps. + // If middle == 0, len(content) == 0, can't use the -1 and +1 seeks. + middle := len(content) - len(content)/3 + if middle > 0 { + if off, err := r.Seek(-1, 1); off != int64(len(content)-1) || err != nil { + return fmt.Errorf("Seek(-1, 1) from EOF = %d, %v, want %d, nil", -off, err, len(content)-1) + } + if off, err := r.Seek(int64(-len(content)/3), 1); off != int64(middle-1) || err != nil { + return fmt.Errorf("Seek(%d, 1) from %d = %d, %v, want %d, nil", -len(content)/3, len(content)-1, off, err, middle-1) + } + if off, err := r.Seek(+1, 1); off != int64(middle) || err != nil { + return fmt.Errorf("Seek(+1, 1) from %d = %d, %v, want %d, nil", middle-1, off, err, middle) + } + } + + // Seek(0, 1) should report the current file position (middle). + if off, err := r.Seek(0, 1); off != int64(middle) || err != nil { + return fmt.Errorf("Seek(0, 1) from %d = %d, %v, want %d, nil", middle, off, err, middle) + } + + // Reading forward should return the last part of the file. + data, err := io.ReadAll(&smallByteReader{r: r}) + if err != nil { + return fmt.Errorf("ReadAll from offset %d: %v", middle, err) + } + if !bytes.Equal(data, content[middle:]) { + return fmt.Errorf("ReadAll from offset %d = %q\n\twant %q", middle, data, content[middle:]) + } + + // Seek relative to end of file, but start elsewhere. + if off, err := r.Seek(int64(middle/2), 0); off != int64(middle/2) || err != nil { + return fmt.Errorf("Seek(%d, 0) from EOF = %d, %v, want %d, nil", middle/2, off, err, middle/2) + } + if off, err := r.Seek(int64(-len(content)/3), 2); off != int64(middle) || err != nil { + return fmt.Errorf("Seek(%d, 2) from %d = %d, %v, want %d, nil", -len(content)/3, middle/2, off, err, middle) + } + + // Reading forward should return the last part of the file (again). + data, err = io.ReadAll(&smallByteReader{r: r}) + if err != nil { + return fmt.Errorf("ReadAll from offset %d: %v", middle, err) + } + if !bytes.Equal(data, content[middle:]) { + return fmt.Errorf("ReadAll from offset %d = %q\n\twant %q", middle, data, content[middle:]) + } + + // Absolute seek & read forward. + if off, err := r.Seek(int64(middle/2), 0); off != int64(middle/2) || err != nil { + return fmt.Errorf("Seek(%d, 0) from EOF = %d, %v, want %d, nil", middle/2, off, err, middle/2) + } + data, err = io.ReadAll(r) + if err != nil { + return fmt.Errorf("ReadAll from offset %d: %v", middle/2, err) + } + if !bytes.Equal(data, content[middle/2:]) { + return fmt.Errorf("ReadAll from offset %d = %q\n\twant %q", middle/2, data, content[middle/2:]) + } + } + + if r, ok := r.(io.ReaderAt); ok { + data := make([]byte, len(content), len(content)+1) + for i := range data { + data[i] = 0xfe + } + n, err := r.ReadAt(data, 0) + if n != len(data) || err != nil && err != io.EOF { + return fmt.Errorf("ReadAt(%d, 0) = %v, %v, want %d, nil or EOF", len(data), n, err, len(data)) + } + if !bytes.Equal(data, content) { + return fmt.Errorf("ReadAt(%d, 0) = %q\n\twant %q", len(data), data, content) + } + + n, err = r.ReadAt(data[:1], int64(len(data))) + if n != 0 || err != io.EOF { + return fmt.Errorf("ReadAt(1, %d) = %v, %v, want 0, EOF", len(data), n, err) + } + + for i := range data { + data[i] = 0xfe + } + n, err = r.ReadAt(data[:cap(data)], 0) + if n != len(data) || err != io.EOF { + return fmt.Errorf("ReadAt(%d, 0) = %v, %v, want %d, EOF", cap(data), n, err, len(data)) + } + if !bytes.Equal(data, content) { + return fmt.Errorf("ReadAt(%d, 0) = %q\n\twant %q", len(data), data, content) + } + + for i := range data { + data[i] = 0xfe + } + for i := range data { + n, err = r.ReadAt(data[i:i+1], int64(i)) + if n != 1 || err != nil && (i != len(data)-1 || err != io.EOF) { + want := "nil" + if i == len(data)-1 { + want = "nil or EOF" + } + return fmt.Errorf("ReadAt(1, %d) = %v, %v, want 1, %s", i, n, err, want) + } + if data[i] != content[i] { + return fmt.Errorf("ReadAt(1, %d) = %q want %q", i, data[i:i+1], content[i:i+1]) + } + } + } + return nil +} diff --git a/testing/testing/iotest/reader_test.go b/testing/testing/iotest/reader_test.go new file mode 100644 index 0000000..1d22237 --- /dev/null +++ b/testing/testing/iotest/reader_test.go @@ -0,0 +1,261 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package iotest + +import ( + "bytes" + "errors" + "io" + "strings" + "testing" +) + +func TestOneByteReader_nonEmptyReader(t *testing.T) { + msg := "Hello, World!" + buf := new(bytes.Buffer) + buf.WriteString(msg) + + obr := OneByteReader(buf) + var b []byte + n, err := obr.Read(b) + if err != nil || n != 0 { + t.Errorf("Empty buffer read returned n=%d err=%v", n, err) + } + + b = make([]byte, 3) + // Read from obr until EOF. + got := new(strings.Builder) + for i := 0; ; i++ { + n, err = obr.Read(b) + if err != nil { + break + } + if g, w := n, 1; g != w { + t.Errorf("Iteration #%d read %d bytes, want %d", i, g, w) + } + got.Write(b[:n]) + } + if g, w := err, io.EOF; g != w { + t.Errorf("Unexpected error after reading all bytes\n\tGot: %v\n\tWant: %v", g, w) + } + if g, w := got.String(), "Hello, World!"; g != w { + t.Errorf("Read mismatch\n\tGot: %q\n\tWant: %q", g, w) + } +} + +func TestOneByteReader_emptyReader(t *testing.T) { + r := new(bytes.Buffer) + + obr := OneByteReader(r) + var b []byte + if n, err := obr.Read(b); err != nil || n != 0 { + t.Errorf("Empty buffer read returned n=%d err=%v", n, err) + } + + b = make([]byte, 5) + n, err := obr.Read(b) + if g, w := err, io.EOF; g != w { + t.Errorf("Error mismatch\n\tGot: %v\n\tWant: %v", g, w) + } + if g, w := n, 0; g != w { + t.Errorf("Unexpectedly read %d bytes, wanted %d", g, w) + } +} + +func TestHalfReader_nonEmptyReader(t *testing.T) { + msg := "Hello, World!" + buf := new(bytes.Buffer) + buf.WriteString(msg) + // empty read buffer + hr := HalfReader(buf) + var b []byte + n, err := hr.Read(b) + if err != nil || n != 0 { + t.Errorf("Empty buffer read returned n=%d err=%v", n, err) + } + // non empty read buffer + b = make([]byte, 2) + got := new(strings.Builder) + for i := 0; ; i++ { + n, err = hr.Read(b) + if err != nil { + break + } + if g, w := n, 1; g != w { + t.Errorf("Iteration #%d read %d bytes, want %d", i, g, w) + } + got.Write(b[:n]) + } + if g, w := err, io.EOF; g != w { + t.Errorf("Unexpected error after reading all bytes\n\tGot: %v\n\tWant: %v", g, w) + } + if g, w := got.String(), "Hello, World!"; g != w { + t.Errorf("Read mismatch\n\tGot: %q\n\tWant: %q", g, w) + } +} + +func TestHalfReader_emptyReader(t *testing.T) { + r := new(bytes.Buffer) + + hr := HalfReader(r) + var b []byte + if n, err := hr.Read(b); err != nil || n != 0 { + t.Errorf("Empty buffer read returned n=%d err=%v", n, err) + } + + b = make([]byte, 5) + n, err := hr.Read(b) + if g, w := err, io.EOF; g != w { + t.Errorf("Error mismatch\n\tGot: %v\n\tWant: %v", g, w) + } + if g, w := n, 0; g != w { + t.Errorf("Unexpectedly read %d bytes, wanted %d", g, w) + } +} + +func TestTimeOutReader_nonEmptyReader(t *testing.T) { + msg := "Hello, World!" + buf := new(bytes.Buffer) + buf.WriteString(msg) + // empty read buffer + tor := TimeoutReader(buf) + var b []byte + n, err := tor.Read(b) + if err != nil || n != 0 { + t.Errorf("Empty buffer read returned n=%d err=%v", n, err) + } + // Second call should timeout + n, err = tor.Read(b) + if g, w := err, ErrTimeout; g != w { + t.Errorf("Error mismatch\n\tGot: %v\n\tWant: %v", g, w) + } + if g, w := n, 0; g != w { + t.Errorf("Unexpectedly read %d bytes, wanted %d", g, w) + } + // non empty read buffer + tor2 := TimeoutReader(buf) + b = make([]byte, 3) + if n, err := tor2.Read(b); err != nil || n == 0 { + t.Errorf("Empty buffer read returned n=%d err=%v", n, err) + } + // Second call should timeout + n, err = tor2.Read(b) + if g, w := err, ErrTimeout; g != w { + t.Errorf("Error mismatch\n\tGot: %v\n\tWant: %v", g, w) + } + if g, w := n, 0; g != w { + t.Errorf("Unexpectedly read %d bytes, wanted %d", g, w) + } +} + +func TestTimeOutReader_emptyReader(t *testing.T) { + r := new(bytes.Buffer) + // empty read buffer + tor := TimeoutReader(r) + var b []byte + if n, err := tor.Read(b); err != nil || n != 0 { + t.Errorf("Empty buffer read returned n=%d err=%v", n, err) + } + // Second call should timeout + n, err := tor.Read(b) + if g, w := err, ErrTimeout; g != w { + t.Errorf("Error mismatch\n\tGot: %v\n\tWant: %v", g, w) + } + if g, w := n, 0; g != w { + t.Errorf("Unexpectedly read %d bytes, wanted %d", g, w) + } + // non empty read buffer + tor2 := TimeoutReader(r) + b = make([]byte, 5) + if n, err := tor2.Read(b); err != io.EOF || n != 0 { + t.Errorf("Empty buffer read returned n=%d err=%v", n, err) + } + // Second call should timeout + n, err = tor2.Read(b) + if g, w := err, ErrTimeout; g != w { + t.Errorf("Error mismatch\n\tGot: %v\n\tWant: %v", g, w) + } + if g, w := n, 0; g != w { + t.Errorf("Unexpectedly read %d bytes, wanted %d", g, w) + } +} + +func TestDataErrReader_nonEmptyReader(t *testing.T) { + msg := "Hello, World!" + buf := new(bytes.Buffer) + buf.WriteString(msg) + + der := DataErrReader(buf) + + b := make([]byte, 3) + got := new(strings.Builder) + var n int + var err error + for { + n, err = der.Read(b) + got.Write(b[:n]) + if err != nil { + break + } + } + if err != io.EOF || n == 0 { + t.Errorf("Last Read returned n=%d err=%v", n, err) + } + if g, w := got.String(), "Hello, World!"; g != w { + t.Errorf("Read mismatch\n\tGot: %q\n\tWant: %q", g, w) + } +} + +func TestDataErrReader_emptyReader(t *testing.T) { + r := new(bytes.Buffer) + + der := DataErrReader(r) + var b []byte + if n, err := der.Read(b); err != io.EOF || n != 0 { + t.Errorf("Empty buffer read returned n=%d err=%v", n, err) + } + + b = make([]byte, 5) + n, err := der.Read(b) + if g, w := err, io.EOF; g != w { + t.Errorf("Error mismatch\n\tGot: %v\n\tWant: %v", g, w) + } + if g, w := n, 0; g != w { + t.Errorf("Unexpectedly read %d bytes, wanted %d", g, w) + } +} + +func TestErrReader(t *testing.T) { + cases := []struct { + name string + err error + }{ + {"nil error", nil}, + {"non-nil error", errors.New("io failure")}, + {"io.EOF", io.EOF}, + } + + for _, tt := range cases { + tt := tt + t.Run(tt.name, func(t *testing.T) { + n, err := ErrReader(tt.err).Read(nil) + if err != tt.err { + t.Fatalf("Error mismatch\nGot: %v\nWant: %v", err, tt.err) + } + if n != 0 { + t.Fatalf("Byte count mismatch: got %d want 0", n) + } + }) + } +} + +func TestStringsReader(t *testing.T) { + const msg = "Now is the time for all good gophers." + + r := strings.NewReader(msg) + if err := TestReader(r, []byte(msg)); err != nil { + t.Fatal(err) + } +} diff --git a/testing/testing/iotest/writer.go b/testing/testing/iotest/writer.go new file mode 100644 index 0000000..af61ab8 --- /dev/null +++ b/testing/testing/iotest/writer.go @@ -0,0 +1,35 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package iotest + +import "io" + +// TruncateWriter returns a Writer that writes to w +// but stops silently after n bytes. +func TruncateWriter(w io.Writer, n int64) io.Writer { + return &truncateWriter{w, n} +} + +type truncateWriter struct { + w io.Writer + n int64 +} + +func (t *truncateWriter) Write(p []byte) (n int, err error) { + if t.n <= 0 { + return len(p), nil + } + // real write + n = len(p) + if int64(n) > t.n { + n = int(t.n) + } + n, err = t.w.Write(p[0:n]) + t.n -= int64(n) + if err == nil { + n = len(p) + } + return +} diff --git a/testing/testing/iotest/writer_test.go b/testing/testing/iotest/writer_test.go new file mode 100644 index 0000000..2762513 --- /dev/null +++ b/testing/testing/iotest/writer_test.go @@ -0,0 +1,39 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package iotest + +import ( + "strings" + "testing" +) + +var truncateWriterTests = []struct { + in string + want string + trunc int64 + n int +}{ + {"hello", "", -1, 5}, + {"world", "", 0, 5}, + {"abcde", "abc", 3, 5}, + {"edcba", "edcba", 7, 5}, +} + +func TestTruncateWriter(t *testing.T) { + for _, tt := range truncateWriterTests { + buf := new(strings.Builder) + tw := TruncateWriter(buf, tt.trunc) + n, err := tw.Write([]byte(tt.in)) + if err != nil { + t.Errorf("Unexpected error %v for\n\t%+v", err, tt) + } + if g, w := buf.String(), tt.want; g != w { + t.Errorf("got %q, expected %q", g, w) + } + if g, w := n, tt.n; g != w { + t.Errorf("read %d bytes, but expected to have read %d bytes for\n\t%+v", g, w, tt) + } + } +} diff --git a/testing/testing/loop_test.go b/testing/testing/loop_test.go new file mode 100644 index 0000000..743cbe6 --- /dev/null +++ b/testing/testing/loop_test.go @@ -0,0 +1,154 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package testing + +import ( + "bytes" + "strings" +) + +// See also TestBenchmarkBLoop* in other files. + +func TestBenchmarkBLoop(t *T) { + var initialStart highPrecisionTime + var firstStart highPrecisionTime + var scaledStart highPrecisionTime + var runningEnd bool + runs := 0 + iters := 0 + firstBN := 0 + restBN := 0 + finalBN := 0 + bRet := Benchmark(func(b *B) { + initialStart = b.start + runs++ + for b.Loop() { + if iters == 0 { + firstStart = b.start + firstBN = b.N + } else { + restBN = max(restBN, b.N) + } + if iters == 1 { + scaledStart = b.start + } + iters++ + } + finalBN = b.N + runningEnd = b.timerOn + }) + // Verify that a b.Loop benchmark is invoked just once. + if runs != 1 { + t.Errorf("want runs == 1, got %d", runs) + } + // Verify that at least one iteration ran. + if iters == 0 { + t.Fatalf("no iterations ran") + } + // Verify that b.N, bRet.N, and the b.Loop() iteration count match. + if finalBN != iters || bRet.N != iters { + t.Errorf("benchmark iterations mismatch: %d loop iterations, final b.N=%d, bRet.N=%d", iters, finalBN, bRet.N) + } + // Verify that b.N was 0 inside the loop + if firstBN != 0 { + t.Errorf("want b.N == 0 on first iteration, got %d", firstBN) + } + if restBN != 0 { + t.Errorf("want b.N == 0 on subsequent iterations, got %d", restBN) + } + // Make sure the benchmark ran for an appropriate amount of time. + if bRet.T < benchTime.d { + t.Fatalf("benchmark ran for %s, want >= %s", bRet.T, benchTime.d) + } + // Verify that the timer is reset on the first loop, and then left alone. + if firstStart == initialStart { + t.Errorf("b.Loop did not reset the timer") + } + if scaledStart != firstStart { + t.Errorf("b.Loop stops and restarts the timer during iteration") + } + // Verify that it stopped the timer after the last loop. + if runningEnd { + t.Errorf("timer was still running after last iteration") + } +} + +func TestBenchmarkBLoopBreak(t *T) { + var bState *B + var bLog bytes.Buffer + bRet := Benchmark(func(b *B) { + // The Benchmark function provides no access to the failure state and + // discards the log, so capture the B and save its log. + bState = b + b.common.w = &bLog + + for i := 0; b.Loop(); i++ { + if i == 2 { + break + } + } + }) + if !bState.failed { + t.Errorf("benchmark should have failed") + } + const wantLog = "benchmark function returned without B.Loop" + if log := bLog.String(); !strings.Contains(log, wantLog) { + t.Errorf("missing error %q in output:\n%s", wantLog, log) + } + // A benchmark that exits early should not report its target iteration count + // because it's not meaningful. + if bRet.N != 0 { + t.Errorf("want N == 0, got %d", bRet.N) + } +} + +func TestBenchmarkBLoopError(t *T) { + // Test that a benchmark that exits early because of an error doesn't *also* + // complain that the benchmark exited early. + var bState *B + var bLog bytes.Buffer + bRet := Benchmark(func(b *B) { + bState = b + b.common.w = &bLog + + for i := 0; b.Loop(); i++ { + b.Error("error") + return + } + }) + if !bState.failed { + t.Errorf("benchmark should have failed") + } + const noWantLog = "benchmark function returned without B.Loop" + if log := bLog.String(); strings.Contains(log, noWantLog) { + t.Errorf("unexpected error %q in output:\n%s", noWantLog, log) + } + if bRet.N != 0 { + t.Errorf("want N == 0, got %d", bRet.N) + } +} + +func TestBenchmarkBLoopStop(t *T) { + var bState *B + var bLog bytes.Buffer + bRet := Benchmark(func(b *B) { + bState = b + b.common.w = &bLog + + for i := 0; b.Loop(); i++ { + b.StopTimer() + } + }) + if !bState.failed { + t.Errorf("benchmark should have failed") + } + const wantLog = "B.Loop called with timer stopped" + if log := bLog.String(); !strings.Contains(log, wantLog) { + t.Errorf("missing error %q in output:\n%s", wantLog, log) + } + if bRet.N != 0 { + t.Errorf("want N == 0, got %d", bRet.N) + } +} diff --git a/testing/testing/match.go b/testing/testing/match.go new file mode 100644 index 0000000..84804dc --- /dev/null +++ b/testing/testing/match.go @@ -0,0 +1,317 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package testing + +import ( + "fmt" + "os" + "strconv" + "strings" + "sync" +) + +// matcher sanitizes, uniques, and filters names of subtests and subbenchmarks. +type matcher struct { + filter filterMatch + skip filterMatch + matchFunc func(pat, str string) (bool, error) + + mu sync.Mutex + + // subNames is used to deduplicate subtest names. + // Each key is the subtest name joined to the deduplicated name of the parent test. + // Each value is the count of the number of occurrences of the given subtest name + // already seen. + subNames map[string]int32 +} + +type filterMatch interface { + // matches checks the name against the receiver's pattern strings using the + // given match function. + matches(name []string, matchString func(pat, str string) (bool, error)) (ok, partial bool) + + // verify checks that the receiver's pattern strings are valid filters by + // calling the given match function. + verify(name string, matchString func(pat, str string) (bool, error)) error +} + +// simpleMatch matches a test name if all of the pattern strings match in +// sequence. +type simpleMatch []string + +// alternationMatch matches a test name if one of the alternations match. +type alternationMatch []filterMatch + +// TODO: fix test_main to avoid race and improve caching, also allowing to +// eliminate this Mutex. +var matchMutex sync.Mutex + +func allMatcher() *matcher { + return newMatcher(nil, "", "", "") +} + +func newMatcher(matchString func(pat, str string) (bool, error), patterns, name, skips string) *matcher { + var filter, skip filterMatch + if patterns == "" { + filter = simpleMatch{} // always partial true + } else { + filter = splitRegexp(patterns) + if err := filter.verify(name, matchString); err != nil { + fmt.Fprintf(os.Stderr, "testing: invalid regexp for %s\n", err) + os.Exit(1) + } + } + if skips == "" { + skip = alternationMatch{} // always false + } else { + skip = splitRegexp(skips) + if err := skip.verify("-test.skip", matchString); err != nil { + fmt.Fprintf(os.Stderr, "testing: invalid regexp for %v\n", err) + os.Exit(1) + } + } + return &matcher{ + filter: filter, + skip: skip, + matchFunc: matchString, + subNames: map[string]int32{}, + } +} + +func (m *matcher) fullName(c *common, subname string) (name string, ok, partial bool) { + name = subname + + m.mu.Lock() + defer m.mu.Unlock() + + if c != nil && c.level > 0 { + name = m.unique(c.name, rewrite(subname)) + } + + matchMutex.Lock() + defer matchMutex.Unlock() + + // We check the full array of paths each time to allow for the case that a pattern contains a '/'. + elem := strings.Split(name, "/") + + // filter must match. + // accept partial match that may produce full match later. + ok, partial = m.filter.matches(elem, m.matchFunc) + if !ok { + return name, false, false + } + + // skip must not match. + // ignore partial match so we can get to more precise match later. + skip, partialSkip := m.skip.matches(elem, m.matchFunc) + if skip && !partialSkip { + return name, false, false + } + + return name, ok, partial +} + +// clearSubNames clears the matcher's internal state, potentially freeing +// memory. After this is called, T.Name may return the same strings as it did +// for earlier subtests. +func (m *matcher) clearSubNames() { + m.mu.Lock() + defer m.mu.Unlock() + clear(m.subNames) +} + +func (m simpleMatch) matches(name []string, matchString func(pat, str string) (bool, error)) (ok, partial bool) { + for i, s := range name { + if i >= len(m) { + break + } + if ok, _ := matchString(m[i], s); !ok { + return false, false + } + } + return true, len(name) < len(m) +} + +func (m simpleMatch) verify(name string, matchString func(pat, str string) (bool, error)) error { + for i, s := range m { + m[i] = rewrite(s) + } + // Verify filters before doing any processing. + for i, s := range m { + if _, err := matchString(s, "non-empty"); err != nil { + return fmt.Errorf("element %d of %s (%q): %s", i, name, s, err) + } + } + return nil +} + +func (m alternationMatch) matches(name []string, matchString func(pat, str string) (bool, error)) (ok, partial bool) { + for _, m := range m { + if ok, partial = m.matches(name, matchString); ok { + return ok, partial + } + } + return false, false +} + +func (m alternationMatch) verify(name string, matchString func(pat, str string) (bool, error)) error { + for i, m := range m { + if err := m.verify(name, matchString); err != nil { + return fmt.Errorf("alternation %d of %s", i, err) + } + } + return nil +} + +func splitRegexp(s string) filterMatch { + a := make(simpleMatch, 0, strings.Count(s, "/")) + b := make(alternationMatch, 0, strings.Count(s, "|")) + cs := 0 + cp := 0 + for i := 0; i < len(s); { + switch s[i] { + case '[': + cs++ + case ']': + if cs--; cs < 0 { // An unmatched ']' is legal. + cs = 0 + } + case '(': + if cs == 0 { + cp++ + } + case ')': + if cs == 0 { + cp-- + } + case '\\': + i++ + case '/': + if cs == 0 && cp == 0 { + a = append(a, s[:i]) + s = s[i+1:] + i = 0 + continue + } + case '|': + if cs == 0 && cp == 0 { + a = append(a, s[:i]) + s = s[i+1:] + i = 0 + b = append(b, a) + a = make(simpleMatch, 0, len(a)) + continue + } + } + i++ + } + + a = append(a, s) + if len(b) == 0 { + return a + } + return append(b, a) +} + +// unique creates a unique name for the given parent and subname by affixing it +// with one or more counts, if necessary. +func (m *matcher) unique(parent, subname string) string { + base := parent + "/" + subname + + for { + n := m.subNames[base] + if n < 0 { + panic("subtest count overflow") + } + m.subNames[base] = n + 1 + + if n == 0 && subname != "" { + prefix, nn := parseSubtestNumber(base) + if len(prefix) < len(base) && nn < m.subNames[prefix] { + // This test is explicitly named like "parent/subname#NN", + // and #NN was already used for the NNth occurrence of "parent/subname". + // Loop to add a disambiguating suffix. + continue + } + return base + } + + name := fmt.Sprintf("%s#%02d", base, n) + if m.subNames[name] != 0 { + // This is the nth occurrence of base, but the name "parent/subname#NN" + // collides with the first occurrence of a subtest *explicitly* named + // "parent/subname#NN". Try the next number. + continue + } + + return name + } +} + +// parseSubtestNumber splits a subtest name into a "#%02d"-formatted int32 +// suffix (if present), and a prefix preceding that suffix (always). +func parseSubtestNumber(s string) (prefix string, nn int32) { + i := strings.LastIndex(s, "#") + if i < 0 { + return s, 0 + } + + prefix, suffix := s[:i], s[i+1:] + if len(suffix) < 2 || (len(suffix) > 2 && suffix[0] == '0') { + // Even if suffix is numeric, it is not a possible output of a "%02" format + // string: it has either too few digits or too many leading zeroes. + return s, 0 + } + if suffix == "00" { + if !strings.HasSuffix(prefix, "/") { + // We only use "#00" as a suffix for subtests named with the empty + // string — it isn't a valid suffix if the subtest name is non-empty. + return s, 0 + } + } + + n, err := strconv.ParseInt(suffix, 10, 32) + if err != nil || n < 0 { + return s, 0 + } + return prefix, int32(n) +} + +// rewrite rewrites a subname to having only printable characters and no white +// space. +func rewrite(s string) string { + b := []byte{} + for _, r := range s { + switch { + case isSpace(r): + b = append(b, '_') + case !strconv.IsPrint(r): + s := strconv.QuoteRune(r) + b = append(b, s[1:len(s)-1]...) + default: + b = append(b, string(r)...) + } + } + return string(b) +} + +func isSpace(r rune) bool { + if r < 0x2000 { + switch r { + // Note: not the same as Unicode Z class. + case '\t', '\n', '\v', '\f', '\r', ' ', 0x85, 0xA0, 0x1680: + return true + } + } else { + if r <= 0x200a { + return true + } + switch r { + case 0x2028, 0x2029, 0x202f, 0x205f, 0x3000: + return true + } + } + return false +} diff --git a/testing/testing/match_test.go b/testing/testing/match_test.go new file mode 100644 index 0000000..d31efbc --- /dev/null +++ b/testing/testing/match_test.go @@ -0,0 +1,263 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package testing + +import ( + "fmt" + "reflect" + "regexp" + "strings" + "unicode" +) + +func init() { + testingTesting = true +} + +// Verify that our IsSpace agrees with unicode.IsSpace. +func TestIsSpace(t *T) { + n := 0 + for r := rune(0); r <= unicode.MaxRune; r++ { + if isSpace(r) != unicode.IsSpace(r) { + t.Errorf("IsSpace(%U)=%t incorrect", r, isSpace(r)) + n++ + if n > 10 { + return + } + } + } +} + +func TestSplitRegexp(t *T) { + res := func(s ...string) filterMatch { return simpleMatch(s) } + alt := func(m ...filterMatch) filterMatch { return alternationMatch(m) } + testCases := []struct { + pattern string + result filterMatch + }{ + // Correct patterns + // If a regexp pattern is correct, all split regexps need to be correct + // as well. + {"", res("")}, + {"/", res("", "")}, + {"//", res("", "", "")}, + {"A", res("A")}, + {"A/B", res("A", "B")}, + {"A/B/", res("A", "B", "")}, + {"/A/B/", res("", "A", "B", "")}, + {"[A]/(B)", res("[A]", "(B)")}, + {"[/]/[/]", res("[/]", "[/]")}, + {"[/]/[:/]", res("[/]", "[:/]")}, + {"/]", res("", "]")}, + {"]/", res("]", "")}, + {"]/[/]", res("]", "[/]")}, + {`([)/][(])`, res(`([)/][(])`)}, + {"[(]/[)]", res("[(]", "[)]")}, + + {"A/B|C/D", alt(res("A", "B"), res("C", "D"))}, + + // Faulty patterns + // Errors in original should produce at least one faulty regexp in results. + {")/", res(")/")}, + {")/(/)", res(")/(", ")")}, + {"a[/)b", res("a[/)b")}, + {"(/]", res("(/]")}, + {"(/", res("(/")}, + {"[/]/[/", res("[/]", "[/")}, + {`\p{/}`, res(`\p{`, "}")}, + {`\p/`, res(`\p`, "")}, + {`[[:/:]]`, res(`[[:/:]]`)}, + } + for _, tc := range testCases { + a := splitRegexp(tc.pattern) + if !reflect.DeepEqual(a, tc.result) { + t.Errorf("splitRegexp(%q) = %#v; want %#v", tc.pattern, a, tc.result) + } + + // If there is any error in the pattern, one of the returned subpatterns + // needs to have an error as well. + if _, err := regexp.Compile(tc.pattern); err != nil { + ok := true + if err := a.verify("", regexp.MatchString); err != nil { + ok = false + } + if ok { + t.Errorf("%s: expected error in any of %q", tc.pattern, a) + } + } + } +} + +func TestMatcher(t *T) { + testCases := []struct { + pattern string + skip string + parent, sub string + ok bool + partial bool + }{ + // Behavior without subtests. + {"", "", "", "TestFoo", true, false}, + {"TestFoo", "", "", "TestFoo", true, false}, + {"TestFoo/", "", "", "TestFoo", true, true}, + {"TestFoo/bar/baz", "", "", "TestFoo", true, true}, + {"TestFoo", "", "", "TestBar", false, false}, + {"TestFoo/", "", "", "TestBar", false, false}, + {"TestFoo/bar/baz", "", "", "TestBar/bar/baz", false, false}, + {"", "TestBar", "", "TestFoo", true, false}, + {"", "TestBar", "", "TestBar", false, false}, + + // Skipping a non-existent test doesn't change anything. + {"", "TestFoo/skipped", "", "TestFoo", true, false}, + {"TestFoo", "TestFoo/skipped", "", "TestFoo", true, false}, + {"TestFoo/", "TestFoo/skipped", "", "TestFoo", true, true}, + {"TestFoo/bar/baz", "TestFoo/skipped", "", "TestFoo", true, true}, + {"TestFoo", "TestFoo/skipped", "", "TestBar", false, false}, + {"TestFoo/", "TestFoo/skipped", "", "TestBar", false, false}, + {"TestFoo/bar/baz", "TestFoo/skipped", "", "TestBar/bar/baz", false, false}, + + // with subtests + {"", "", "TestFoo", "x", true, false}, + {"TestFoo", "", "TestFoo", "x", true, false}, + {"TestFoo/", "", "TestFoo", "x", true, false}, + {"TestFoo/bar/baz", "", "TestFoo", "bar", true, true}, + + {"", "TestFoo/skipped", "TestFoo", "x", true, false}, + {"TestFoo", "TestFoo/skipped", "TestFoo", "x", true, false}, + {"TestFoo", "TestFoo/skipped", "TestFoo", "skipped", false, false}, + {"TestFoo/", "TestFoo/skipped", "TestFoo", "x", true, false}, + {"TestFoo/bar/baz", "TestFoo/skipped", "TestFoo", "bar", true, true}, + + // Subtest with a '/' in its name still allows for copy and pasted names + // to match. + {"TestFoo/bar/baz", "", "TestFoo", "bar/baz", true, false}, + {"TestFoo/bar/baz", "TestFoo/bar/baz", "TestFoo", "bar/baz", false, false}, + {"TestFoo/bar/baz", "TestFoo/bar/baz/skip", "TestFoo", "bar/baz", true, false}, + {"TestFoo/bar/baz", "", "TestFoo/bar", "baz", true, false}, + {"TestFoo/bar/baz", "", "TestFoo", "x", false, false}, + {"TestFoo", "", "TestBar", "x", false, false}, + {"TestFoo/", "", "TestBar", "x", false, false}, + {"TestFoo/bar/baz", "", "TestBar", "x/bar/baz", false, false}, + + {"A/B|C/D", "", "TestA", "B", true, false}, + {"A/B|C/D", "", "TestC", "D", true, false}, + {"A/B|C/D", "", "TestA", "C", false, false}, + + // subtests only + {"", "", "TestFoo", "x", true, false}, + {"/", "", "TestFoo", "x", true, false}, + {"./", "", "TestFoo", "x", true, false}, + {"./.", "", "TestFoo", "x", true, false}, + {"/bar/baz", "", "TestFoo", "bar", true, true}, + {"/bar/baz", "", "TestFoo", "bar/baz", true, false}, + {"//baz", "", "TestFoo", "bar/baz", true, false}, + {"//", "", "TestFoo", "bar/baz", true, false}, + {"/bar/baz", "", "TestFoo/bar", "baz", true, false}, + {"//foo", "", "TestFoo", "bar/baz", false, false}, + {"/bar/baz", "", "TestFoo", "x", false, false}, + {"/bar/baz", "", "TestBar", "x/bar/baz", false, false}, + } + + for _, tc := range testCases { + m := newMatcher(regexp.MatchString, tc.pattern, "-test.run", tc.skip) + + parent := &common{name: tc.parent} + if tc.parent != "" { + parent.level = 1 + } + if n, ok, partial := m.fullName(parent, tc.sub); ok != tc.ok || partial != tc.partial { + t.Errorf("for pattern %q, fullName(parent=%q, sub=%q) = %q, ok %v partial %v; want ok %v partial %v", + tc.pattern, tc.parent, tc.sub, n, ok, partial, tc.ok, tc.partial) + } + } +} + +var namingTestCases = []struct{ name, want string }{ + // Uniqueness + {"", "x/#00"}, + {"", "x/#01"}, + {"#0", "x/#0"}, // Doesn't conflict with #00 because the number of digits differs. + {"#00", "x/#00#01"}, // Conflicts with implicit #00 (used above), so add a suffix. + {"#", "x/#"}, + {"#", "x/##01"}, + + {"t", "x/t"}, + {"t", "x/t#01"}, + {"t", "x/t#02"}, + {"t#00", "x/t#00"}, // Explicit "#00" doesn't conflict with the unsuffixed first subtest. + + {"a#01", "x/a#01"}, // user has subtest with this name. + {"a", "x/a"}, // doesn't conflict with this name. + {"a", "x/a#02"}, // This string is claimed now, so resume + {"a", "x/a#03"}, // with counting. + {"a#02", "x/a#02#01"}, // We already used a#02 once, so add a suffix. + + {"b#00", "x/b#00"}, + {"b", "x/b"}, // Implicit 0 doesn't conflict with explicit "#00". + {"b", "x/b#01"}, + {"b#9223372036854775807", "x/b#9223372036854775807"}, // MaxInt64 + {"b", "x/b#02"}, + {"b", "x/b#03"}, + + // Sanitizing + {"A:1 B:2", "x/A:1_B:2"}, + {"s\t\r\u00a0", "x/s___"}, + {"\x01", `x/\x01`}, + {"\U0010ffff", `x/\U0010ffff`}, +} + +func TestNaming(t *T) { + m := newMatcher(regexp.MatchString, "", "", "") + parent := &common{name: "x", level: 1} // top-level test. + + for i, tc := range namingTestCases { + if got, _, _ := m.fullName(parent, tc.name); got != tc.want { + t.Errorf("%d:%s: got %q; want %q", i, tc.name, got, tc.want) + } + } +} + +func FuzzNaming(f *F) { + for _, tc := range namingTestCases { + f.Add(tc.name) + } + parent := &common{name: "x", level: 1} + var m *matcher + var seen map[string]string + reset := func() { + m = allMatcher() + seen = make(map[string]string) + } + reset() + + f.Fuzz(func(t *T, subname string) { + if len(subname) > 10 { + // Long names attract the OOM killer. + t.Skip() + } + name := m.unique(parent.name, subname) + if !strings.Contains(name, "/"+subname) { + t.Errorf("name %q does not contain subname %q", name, subname) + } + if prev, ok := seen[name]; ok { + t.Errorf("name %q generated by both %q and %q", name, prev, subname) + } + if len(seen) > 1e6 { + // Free up memory. + reset() + } + seen[name] = subname + }) +} + +// GoString returns a string that is more readable than the default, which makes +// it easier to read test errors. +func (m alternationMatch) GoString() string { + s := make([]string, len(m)) + for i, m := range m { + s[i] = fmt.Sprintf("%#v", m) + } + return fmt.Sprintf("(%s)", strings.Join(s, " | ")) +} diff --git a/testing/testing/newcover.go b/testing/testing/newcover.go new file mode 100644 index 0000000..03988a1 --- /dev/null +++ b/testing/testing/newcover.go @@ -0,0 +1,57 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Support for test coverage with redesigned coverage implementation. + +package testing + +import ( + "fmt" + "os" + _ "unsafe" // for linkname + + "github.com/CodSpeedHQ/codspeed-go/testing/internal/goexperiment" +) + +// cover2 variable stores the current coverage mode and a +// tear-down function to be called at the end of the testing run. +var cover2 struct { + mode string + tearDown func(coverprofile string, gocoverdir string) (string, error) + snapshotcov func() float64 +} + +// registerCover2 is invoked during "go test -cover" runs. +// It is used to record a 'tear down' function +// (to be called when the test is complete) and the coverage mode. +func registerCover2(mode string, tearDown func(coverprofile string, gocoverdir string) (string, error), snapcov func() float64) { + if mode == "" { + return + } + cover2.mode = mode + cover2.tearDown = tearDown + cover2.snapshotcov = snapcov +} + +// coverReport2 invokes a callback in _testmain.go that will +// emit coverage data at the point where test execution is complete, +// for "go test -cover" runs. +func coverReport2() { + if !goexperiment.CoverageRedesign { + panic("unexpected") + } + if errmsg, err := cover2.tearDown(*coverProfile, *gocoverdir); err != nil { + fmt.Fprintf(os.Stderr, "%s: %v\n", errmsg, err) + os.Exit(2) + } +} + +// coverage2 returns a rough "coverage percentage so far" +// number to support the testing.Coverage() function. +func coverage2() float64 { + if cover2.mode == "" { + return 0.0 + } + return cover2.snapshotcov() +} diff --git a/testing/testing/panic_test.go b/testing/testing/panic_test.go new file mode 100644 index 0000000..48b0556 --- /dev/null +++ b/testing/testing/panic_test.go @@ -0,0 +1,268 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package testing_test + +import ( + "flag" + "fmt" + "os" + "os/exec" + "regexp" + "runtime" + "strings" + "testing" + + "github.com/CodSpeedHQ/codspeed-go/testing/internal/testenv" +) + +var testPanicTest = flag.String("test_panic_test", "", "TestPanic: indicates which test should panic") +var testPanicParallel = flag.Bool("test_panic_parallel", false, "TestPanic: run subtests in parallel") +var testPanicCleanup = flag.Bool("test_panic_cleanup", false, "TestPanic: indicates whether test should call Cleanup") +var testPanicCleanupPanic = flag.String("test_panic_cleanup_panic", "", "TestPanic: indicate whether test should call Cleanup function that panics") + +func TestPanic(t *testing.T) { + testenv.MustHaveExec(t) + + testCases := []struct { + desc string + flags []string + want string + }{{ + desc: "root test panics", + flags: []string{"-test_panic_test=TestPanicHelper"}, + want: ` +--- FAIL: TestPanicHelper (N.NNs) + panic_test.go:NNN: TestPanicHelper +`, + }, { + desc: "subtest panics", + flags: []string{"-test_panic_test=TestPanicHelper/1"}, + want: ` +--- FAIL: TestPanicHelper (N.NNs) + panic_test.go:NNN: TestPanicHelper + --- FAIL: TestPanicHelper/1 (N.NNs) + panic_test.go:NNN: TestPanicHelper/1 +`, + }, { + desc: "subtest panics with cleanup", + flags: []string{"-test_panic_test=TestPanicHelper/1", "-test_panic_cleanup"}, + want: ` +ran inner cleanup 1 +ran middle cleanup 1 +ran outer cleanup +--- FAIL: TestPanicHelper (N.NNs) + panic_test.go:NNN: TestPanicHelper + --- FAIL: TestPanicHelper/1 (N.NNs) + panic_test.go:NNN: TestPanicHelper/1 +`, + }, { + desc: "subtest panics with outer cleanup panic", + flags: []string{"-test_panic_test=TestPanicHelper/1", "-test_panic_cleanup", "-test_panic_cleanup_panic=outer"}, + want: ` +ran inner cleanup 1 +ran middle cleanup 1 +ran outer cleanup +--- FAIL: TestPanicHelper (N.NNs) + panic_test.go:NNN: TestPanicHelper +`, + }, { + desc: "subtest panics with middle cleanup panic", + flags: []string{"-test_panic_test=TestPanicHelper/1", "-test_panic_cleanup", "-test_panic_cleanup_panic=middle"}, + want: ` +ran inner cleanup 1 +ran middle cleanup 1 +ran outer cleanup +--- FAIL: TestPanicHelper (N.NNs) + panic_test.go:NNN: TestPanicHelper + --- FAIL: TestPanicHelper/1 (N.NNs) + panic_test.go:NNN: TestPanicHelper/1 +`, + }, { + desc: "subtest panics with inner cleanup panic", + flags: []string{"-test_panic_test=TestPanicHelper/1", "-test_panic_cleanup", "-test_panic_cleanup_panic=inner"}, + want: ` +ran inner cleanup 1 +ran middle cleanup 1 +ran outer cleanup +--- FAIL: TestPanicHelper (N.NNs) + panic_test.go:NNN: TestPanicHelper + --- FAIL: TestPanicHelper/1 (N.NNs) + panic_test.go:NNN: TestPanicHelper/1 +`, + }, { + desc: "parallel subtest panics with cleanup", + flags: []string{"-test_panic_test=TestPanicHelper/1", "-test_panic_cleanup", "-test_panic_parallel"}, + want: ` +ran inner cleanup 1 +ran middle cleanup 1 +ran outer cleanup +--- FAIL: TestPanicHelper (N.NNs) + panic_test.go:NNN: TestPanicHelper + --- FAIL: TestPanicHelper/1 (N.NNs) + panic_test.go:NNN: TestPanicHelper/1 +`, + }, { + desc: "parallel subtest panics with outer cleanup panic", + flags: []string{"-test_panic_test=TestPanicHelper/1", "-test_panic_cleanup", "-test_panic_cleanup_panic=outer", "-test_panic_parallel"}, + want: ` +ran inner cleanup 1 +ran middle cleanup 1 +ran outer cleanup +--- FAIL: TestPanicHelper (N.NNs) + panic_test.go:NNN: TestPanicHelper +`, + }, { + desc: "parallel subtest panics with middle cleanup panic", + flags: []string{"-test_panic_test=TestPanicHelper/1", "-test_panic_cleanup", "-test_panic_cleanup_panic=middle", "-test_panic_parallel"}, + want: ` +ran inner cleanup 1 +ran middle cleanup 1 +ran outer cleanup +--- FAIL: TestPanicHelper (N.NNs) + panic_test.go:NNN: TestPanicHelper + --- FAIL: TestPanicHelper/1 (N.NNs) + panic_test.go:NNN: TestPanicHelper/1 +`, + }, { + desc: "parallel subtest panics with inner cleanup panic", + flags: []string{"-test_panic_test=TestPanicHelper/1", "-test_panic_cleanup", "-test_panic_cleanup_panic=inner", "-test_panic_parallel"}, + want: ` +ran inner cleanup 1 +ran middle cleanup 1 +ran outer cleanup +--- FAIL: TestPanicHelper (N.NNs) + panic_test.go:NNN: TestPanicHelper + --- FAIL: TestPanicHelper/1 (N.NNs) + panic_test.go:NNN: TestPanicHelper/1 +`, + }} + for _, tc := range testCases { + t.Run(tc.desc, func(t *testing.T) { + cmd := exec.Command(os.Args[0], "-test.run=^TestPanicHelper$") + cmd.Args = append(cmd.Args, tc.flags...) + cmd.Env = append(os.Environ(), "GO_WANT_HELPER_PROCESS=1") + b, _ := cmd.CombinedOutput() + got := string(b) + want := strings.TrimSpace(tc.want) + re := makeRegexp(want) + if ok, err := regexp.MatchString(re, got); !ok || err != nil { + t.Errorf("output:\ngot:\n%s\nwant:\n%s", got, want) + } + }) + } +} + +func makeRegexp(s string) string { + s = regexp.QuoteMeta(s) + s = strings.ReplaceAll(s, ":NNN:", `:\d+:`) + s = strings.ReplaceAll(s, "N\\.NNs", `\d*\.\d*s`) + return s +} + +func TestPanicHelper(t *testing.T) { + if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" { + return + } + t.Log(t.Name()) + if t.Name() == *testPanicTest { + panic("panic") + } + switch *testPanicCleanupPanic { + case "", "outer", "middle", "inner": + default: + t.Fatalf("bad -test_panic_cleanup_panic: %s", *testPanicCleanupPanic) + } + t.Cleanup(func() { + fmt.Println("ran outer cleanup") + if *testPanicCleanupPanic == "outer" { + panic("outer cleanup") + } + }) + for i := 0; i < 3; i++ { + i := i + t.Run(fmt.Sprintf("%v", i), func(t *testing.T) { + chosen := t.Name() == *testPanicTest + if chosen && *testPanicCleanup { + t.Cleanup(func() { + fmt.Printf("ran middle cleanup %d\n", i) + if *testPanicCleanupPanic == "middle" { + panic("middle cleanup") + } + }) + } + if chosen && *testPanicParallel { + t.Parallel() + } + t.Log(t.Name()) + if chosen { + if *testPanicCleanup { + t.Cleanup(func() { + fmt.Printf("ran inner cleanup %d\n", i) + if *testPanicCleanupPanic == "inner" { + panic("inner cleanup") + } + }) + } + panic("panic") + } + }) + } +} + +func TestMorePanic(t *testing.T) { + testenv.MustHaveExec(t) + + testCases := []struct { + desc string + flags []string + want string + }{ + { + desc: "Issue 48502: call runtime.Goexit in t.Cleanup after panic", + flags: []string{"-test.run=^TestGoexitInCleanupAfterPanicHelper$"}, + want: `panic: die + panic: test executed panic(nil) or runtime.Goexit`, + }, + { + desc: "Issue 48515: call t.Run in t.Cleanup should trigger panic", + flags: []string{"-test.run=^TestCallRunInCleanupHelper$"}, + want: `panic: testing: t.Run called during t.Cleanup`, + }, + } + + for _, tc := range testCases { + cmd := exec.Command(os.Args[0], tc.flags...) + cmd.Env = append(os.Environ(), "GO_WANT_HELPER_PROCESS=1") + b, _ := cmd.CombinedOutput() + got := string(b) + want := tc.want + re := makeRegexp(want) + if ok, err := regexp.MatchString(re, got); !ok || err != nil { + t.Errorf("output:\ngot:\n%s\nwant:\n%s", got, want) + } + } +} + +func TestCallRunInCleanupHelper(t *testing.T) { + if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" { + return + } + + t.Cleanup(func() { + t.Run("in-cleanup", func(t *testing.T) { + t.Log("must not be executed") + }) + }) +} + +func TestGoexitInCleanupAfterPanicHelper(t *testing.T) { + if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" { + return + } + + t.Cleanup(func() { runtime.Goexit() }) + t.Parallel() + panic("die") +} diff --git a/testing/testing/quick/quick.go b/testing/testing/quick/quick.go new file mode 100644 index 0000000..8ef9cf7 --- /dev/null +++ b/testing/testing/quick/quick.go @@ -0,0 +1,385 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package quick implements utility functions to help with black box testing. +// +// The testing/quick package is frozen and is not accepting new features. +package quick + +import ( + "flag" + "fmt" + "math" + "math/rand" + "reflect" + "strings" + "time" +) + +var defaultMaxCount *int = flag.Int("quickchecks", 100, "The default number of iterations for each check") + +// A Generator can generate random values of its own type. +type Generator interface { + // Generate returns a random instance of the type on which it is a + // method using the size as a size hint. + Generate(rand *rand.Rand, size int) reflect.Value +} + +// randFloat32 generates a random float taking the full range of a float32. +func randFloat32(rand *rand.Rand) float32 { + f := rand.Float64() * math.MaxFloat32 + if rand.Int()&1 == 1 { + f = -f + } + return float32(f) +} + +// randFloat64 generates a random float taking the full range of a float64. +func randFloat64(rand *rand.Rand) float64 { + f := rand.Float64() * math.MaxFloat64 + if rand.Int()&1 == 1 { + f = -f + } + return f +} + +// randInt64 returns a random int64. +func randInt64(rand *rand.Rand) int64 { + return int64(rand.Uint64()) +} + +// complexSize is the maximum length of arbitrary values that contain other +// values. +const complexSize = 50 + +// Value returns an arbitrary value of the given type. +// If the type implements the [Generator] interface, that will be used. +// Note: To create arbitrary values for structs, all the fields must be exported. +func Value(t reflect.Type, rand *rand.Rand) (value reflect.Value, ok bool) { + return sizedValue(t, rand, complexSize) +} + +// sizedValue returns an arbitrary value of the given type. The size +// hint is used for shrinking as a function of indirection level so +// that recursive data structures will terminate. +func sizedValue(t reflect.Type, rand *rand.Rand, size int) (value reflect.Value, ok bool) { + if m, ok := reflect.Zero(t).Interface().(Generator); ok { + return m.Generate(rand, size), true + } + + v := reflect.New(t).Elem() + switch concrete := t; concrete.Kind() { + case reflect.Bool: + v.SetBool(rand.Int()&1 == 0) + case reflect.Float32: + v.SetFloat(float64(randFloat32(rand))) + case reflect.Float64: + v.SetFloat(randFloat64(rand)) + case reflect.Complex64: + v.SetComplex(complex(float64(randFloat32(rand)), float64(randFloat32(rand)))) + case reflect.Complex128: + v.SetComplex(complex(randFloat64(rand), randFloat64(rand))) + case reflect.Int16: + v.SetInt(randInt64(rand)) + case reflect.Int32: + v.SetInt(randInt64(rand)) + case reflect.Int64: + v.SetInt(randInt64(rand)) + case reflect.Int8: + v.SetInt(randInt64(rand)) + case reflect.Int: + v.SetInt(randInt64(rand)) + case reflect.Uint16: + v.SetUint(uint64(randInt64(rand))) + case reflect.Uint32: + v.SetUint(uint64(randInt64(rand))) + case reflect.Uint64: + v.SetUint(uint64(randInt64(rand))) + case reflect.Uint8: + v.SetUint(uint64(randInt64(rand))) + case reflect.Uint: + v.SetUint(uint64(randInt64(rand))) + case reflect.Uintptr: + v.SetUint(uint64(randInt64(rand))) + case reflect.Map: + numElems := rand.Intn(size) + v.Set(reflect.MakeMap(concrete)) + for i := 0; i < numElems; i++ { + key, ok1 := sizedValue(concrete.Key(), rand, size) + value, ok2 := sizedValue(concrete.Elem(), rand, size) + if !ok1 || !ok2 { + return reflect.Value{}, false + } + v.SetMapIndex(key, value) + } + case reflect.Pointer: + if rand.Intn(size) == 0 { + v.SetZero() // Generate nil pointer. + } else { + elem, ok := sizedValue(concrete.Elem(), rand, size) + if !ok { + return reflect.Value{}, false + } + v.Set(reflect.New(concrete.Elem())) + v.Elem().Set(elem) + } + case reflect.Slice: + numElems := rand.Intn(size) + sizeLeft := size - numElems + v.Set(reflect.MakeSlice(concrete, numElems, numElems)) + for i := 0; i < numElems; i++ { + elem, ok := sizedValue(concrete.Elem(), rand, sizeLeft) + if !ok { + return reflect.Value{}, false + } + v.Index(i).Set(elem) + } + case reflect.Array: + for i := 0; i < v.Len(); i++ { + elem, ok := sizedValue(concrete.Elem(), rand, size) + if !ok { + return reflect.Value{}, false + } + v.Index(i).Set(elem) + } + case reflect.String: + numChars := rand.Intn(complexSize) + codePoints := make([]rune, numChars) + for i := 0; i < numChars; i++ { + codePoints[i] = rune(rand.Intn(0x10ffff)) + } + v.SetString(string(codePoints)) + case reflect.Struct: + n := v.NumField() + // Divide sizeLeft evenly among the struct fields. + sizeLeft := size + if n > sizeLeft { + sizeLeft = 1 + } else if n > 0 { + sizeLeft /= n + } + for i := 0; i < n; i++ { + elem, ok := sizedValue(concrete.Field(i).Type, rand, sizeLeft) + if !ok { + return reflect.Value{}, false + } + v.Field(i).Set(elem) + } + default: + return reflect.Value{}, false + } + + return v, true +} + +// A Config structure contains options for running a test. +type Config struct { + // MaxCount sets the maximum number of iterations. + // If zero, MaxCountScale is used. + MaxCount int + // MaxCountScale is a non-negative scale factor applied to the + // default maximum. + // A count of zero implies the default, which is usually 100 + // but can be set by the -quickchecks flag. + MaxCountScale float64 + // Rand specifies a source of random numbers. + // If nil, a default pseudo-random source will be used. + Rand *rand.Rand + // Values specifies a function to generate a slice of + // arbitrary reflect.Values that are congruent with the + // arguments to the function being tested. + // If nil, the top-level Value function is used to generate them. + Values func([]reflect.Value, *rand.Rand) +} + +var defaultConfig Config + +// getRand returns the *rand.Rand to use for a given Config. +func (c *Config) getRand() *rand.Rand { + if c.Rand == nil { + return rand.New(rand.NewSource(time.Now().UnixNano())) + } + return c.Rand +} + +// getMaxCount returns the maximum number of iterations to run for a given +// Config. +func (c *Config) getMaxCount() (maxCount int) { + maxCount = c.MaxCount + if maxCount == 0 { + if c.MaxCountScale != 0 { + maxCount = int(c.MaxCountScale * float64(*defaultMaxCount)) + } else { + maxCount = *defaultMaxCount + } + } + + return +} + +// A SetupError is the result of an error in the way that check is being +// used, independent of the functions being tested. +type SetupError string + +func (s SetupError) Error() string { return string(s) } + +// A CheckError is the result of Check finding an error. +type CheckError struct { + Count int + In []any +} + +func (s *CheckError) Error() string { + return fmt.Sprintf("#%d: failed on input %s", s.Count, toString(s.In)) +} + +// A CheckEqualError is the result [CheckEqual] finding an error. +type CheckEqualError struct { + CheckError + Out1 []any + Out2 []any +} + +func (s *CheckEqualError) Error() string { + return fmt.Sprintf("#%d: failed on input %s. Output 1: %s. Output 2: %s", s.Count, toString(s.In), toString(s.Out1), toString(s.Out2)) +} + +// Check looks for an input to f, any function that returns bool, +// such that f returns false. It calls f repeatedly, with arbitrary +// values for each argument. If f returns false on a given input, +// Check returns that input as a *[CheckError]. +// For example: +// +// func TestOddMultipleOfThree(t *testing.T) { +// f := func(x int) bool { +// y := OddMultipleOfThree(x) +// return y%2 == 1 && y%3 == 0 +// } +// if err := quick.Check(f, nil); err != nil { +// t.Error(err) +// } +// } +func Check(f any, config *Config) error { + if config == nil { + config = &defaultConfig + } + + fVal, fType, ok := functionAndType(f) + if !ok { + return SetupError("argument is not a function") + } + + if fType.NumOut() != 1 { + return SetupError("function does not return one value") + } + if fType.Out(0).Kind() != reflect.Bool { + return SetupError("function does not return a bool") + } + + arguments := make([]reflect.Value, fType.NumIn()) + rand := config.getRand() + maxCount := config.getMaxCount() + + for i := 0; i < maxCount; i++ { + err := arbitraryValues(arguments, fType, config, rand) + if err != nil { + return err + } + + if !fVal.Call(arguments)[0].Bool() { + return &CheckError{i + 1, toInterfaces(arguments)} + } + } + + return nil +} + +// CheckEqual looks for an input on which f and g return different results. +// It calls f and g repeatedly with arbitrary values for each argument. +// If f and g return different answers, CheckEqual returns a *[CheckEqualError] +// describing the input and the outputs. +func CheckEqual(f, g any, config *Config) error { + if config == nil { + config = &defaultConfig + } + + x, xType, ok := functionAndType(f) + if !ok { + return SetupError("f is not a function") + } + y, yType, ok := functionAndType(g) + if !ok { + return SetupError("g is not a function") + } + + if xType != yType { + return SetupError("functions have different types") + } + + arguments := make([]reflect.Value, xType.NumIn()) + rand := config.getRand() + maxCount := config.getMaxCount() + + for i := 0; i < maxCount; i++ { + err := arbitraryValues(arguments, xType, config, rand) + if err != nil { + return err + } + + xOut := toInterfaces(x.Call(arguments)) + yOut := toInterfaces(y.Call(arguments)) + + if !reflect.DeepEqual(xOut, yOut) { + return &CheckEqualError{CheckError{i + 1, toInterfaces(arguments)}, xOut, yOut} + } + } + + return nil +} + +// arbitraryValues writes Values to args such that args contains Values +// suitable for calling f. +func arbitraryValues(args []reflect.Value, f reflect.Type, config *Config, rand *rand.Rand) (err error) { + if config.Values != nil { + config.Values(args, rand) + return + } + + for j := 0; j < len(args); j++ { + var ok bool + args[j], ok = Value(f.In(j), rand) + if !ok { + err = SetupError(fmt.Sprintf("cannot create arbitrary value of type %s for argument %d", f.In(j), j)) + return + } + } + + return +} + +func functionAndType(f any) (v reflect.Value, t reflect.Type, ok bool) { + v = reflect.ValueOf(f) + ok = v.Kind() == reflect.Func + if !ok { + return + } + t = v.Type() + return +} + +func toInterfaces(values []reflect.Value) []any { + ret := make([]any, len(values)) + for i, v := range values { + ret[i] = v.Interface() + } + return ret +} + +func toString(interfaces []any) string { + s := make([]string, len(interfaces)) + for i, v := range interfaces { + s[i] = fmt.Sprintf("%#v", v) + } + return strings.Join(s, ", ") +} diff --git a/testing/testing/quick/quick_test.go b/testing/testing/quick/quick_test.go new file mode 100644 index 0000000..9df6dd4 --- /dev/null +++ b/testing/testing/quick/quick_test.go @@ -0,0 +1,327 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package quick + +import ( + "math/rand" + "reflect" + "testing" +) + +func fArray(a [4]byte) [4]byte { return a } + +type TestArrayAlias [4]byte + +func fArrayAlias(a TestArrayAlias) TestArrayAlias { return a } + +func fBool(a bool) bool { return a } + +type TestBoolAlias bool + +func fBoolAlias(a TestBoolAlias) TestBoolAlias { return a } + +func fFloat32(a float32) float32 { return a } + +type TestFloat32Alias float32 + +func fFloat32Alias(a TestFloat32Alias) TestFloat32Alias { return a } + +func fFloat64(a float64) float64 { return a } + +type TestFloat64Alias float64 + +func fFloat64Alias(a TestFloat64Alias) TestFloat64Alias { return a } + +func fComplex64(a complex64) complex64 { return a } + +type TestComplex64Alias complex64 + +func fComplex64Alias(a TestComplex64Alias) TestComplex64Alias { return a } + +func fComplex128(a complex128) complex128 { return a } + +type TestComplex128Alias complex128 + +func fComplex128Alias(a TestComplex128Alias) TestComplex128Alias { return a } + +func fInt16(a int16) int16 { return a } + +type TestInt16Alias int16 + +func fInt16Alias(a TestInt16Alias) TestInt16Alias { return a } + +func fInt32(a int32) int32 { return a } + +type TestInt32Alias int32 + +func fInt32Alias(a TestInt32Alias) TestInt32Alias { return a } + +func fInt64(a int64) int64 { return a } + +type TestInt64Alias int64 + +func fInt64Alias(a TestInt64Alias) TestInt64Alias { return a } + +func fInt8(a int8) int8 { return a } + +type TestInt8Alias int8 + +func fInt8Alias(a TestInt8Alias) TestInt8Alias { return a } + +func fInt(a int) int { return a } + +type TestIntAlias int + +func fIntAlias(a TestIntAlias) TestIntAlias { return a } + +func fMap(a map[int]int) map[int]int { return a } + +type TestMapAlias map[int]int + +func fMapAlias(a TestMapAlias) TestMapAlias { return a } + +func fPtr(a *int) *int { + if a == nil { + return nil + } + b := *a + return &b +} + +type TestPtrAlias *int + +func fPtrAlias(a TestPtrAlias) TestPtrAlias { return a } + +func fSlice(a []byte) []byte { return a } + +type TestSliceAlias []byte + +func fSliceAlias(a TestSliceAlias) TestSliceAlias { return a } + +func fString(a string) string { return a } + +type TestStringAlias string + +func fStringAlias(a TestStringAlias) TestStringAlias { return a } + +type TestStruct struct { + A int + B string +} + +func fStruct(a TestStruct) TestStruct { return a } + +type TestStructAlias TestStruct + +func fStructAlias(a TestStructAlias) TestStructAlias { return a } + +func fUint16(a uint16) uint16 { return a } + +type TestUint16Alias uint16 + +func fUint16Alias(a TestUint16Alias) TestUint16Alias { return a } + +func fUint32(a uint32) uint32 { return a } + +type TestUint32Alias uint32 + +func fUint32Alias(a TestUint32Alias) TestUint32Alias { return a } + +func fUint64(a uint64) uint64 { return a } + +type TestUint64Alias uint64 + +func fUint64Alias(a TestUint64Alias) TestUint64Alias { return a } + +func fUint8(a uint8) uint8 { return a } + +type TestUint8Alias uint8 + +func fUint8Alias(a TestUint8Alias) TestUint8Alias { return a } + +func fUint(a uint) uint { return a } + +type TestUintAlias uint + +func fUintAlias(a TestUintAlias) TestUintAlias { return a } + +func fUintptr(a uintptr) uintptr { return a } + +type TestUintptrAlias uintptr + +func fUintptrAlias(a TestUintptrAlias) TestUintptrAlias { return a } + +func reportError(property string, err error, t *testing.T) { + if err != nil { + t.Errorf("%s: %s", property, err) + } +} + +func TestCheckEqual(t *testing.T) { + reportError("fArray", CheckEqual(fArray, fArray, nil), t) + reportError("fArrayAlias", CheckEqual(fArrayAlias, fArrayAlias, nil), t) + reportError("fBool", CheckEqual(fBool, fBool, nil), t) + reportError("fBoolAlias", CheckEqual(fBoolAlias, fBoolAlias, nil), t) + reportError("fFloat32", CheckEqual(fFloat32, fFloat32, nil), t) + reportError("fFloat32Alias", CheckEqual(fFloat32Alias, fFloat32Alias, nil), t) + reportError("fFloat64", CheckEqual(fFloat64, fFloat64, nil), t) + reportError("fFloat64Alias", CheckEqual(fFloat64Alias, fFloat64Alias, nil), t) + reportError("fComplex64", CheckEqual(fComplex64, fComplex64, nil), t) + reportError("fComplex64Alias", CheckEqual(fComplex64Alias, fComplex64Alias, nil), t) + reportError("fComplex128", CheckEqual(fComplex128, fComplex128, nil), t) + reportError("fComplex128Alias", CheckEqual(fComplex128Alias, fComplex128Alias, nil), t) + reportError("fInt16", CheckEqual(fInt16, fInt16, nil), t) + reportError("fInt16Alias", CheckEqual(fInt16Alias, fInt16Alias, nil), t) + reportError("fInt32", CheckEqual(fInt32, fInt32, nil), t) + reportError("fInt32Alias", CheckEqual(fInt32Alias, fInt32Alias, nil), t) + reportError("fInt64", CheckEqual(fInt64, fInt64, nil), t) + reportError("fInt64Alias", CheckEqual(fInt64Alias, fInt64Alias, nil), t) + reportError("fInt8", CheckEqual(fInt8, fInt8, nil), t) + reportError("fInt8Alias", CheckEqual(fInt8Alias, fInt8Alias, nil), t) + reportError("fInt", CheckEqual(fInt, fInt, nil), t) + reportError("fIntAlias", CheckEqual(fIntAlias, fIntAlias, nil), t) + reportError("fInt32", CheckEqual(fInt32, fInt32, nil), t) + reportError("fInt32Alias", CheckEqual(fInt32Alias, fInt32Alias, nil), t) + reportError("fMap", CheckEqual(fMap, fMap, nil), t) + reportError("fMapAlias", CheckEqual(fMapAlias, fMapAlias, nil), t) + reportError("fPtr", CheckEqual(fPtr, fPtr, nil), t) + reportError("fPtrAlias", CheckEqual(fPtrAlias, fPtrAlias, nil), t) + reportError("fSlice", CheckEqual(fSlice, fSlice, nil), t) + reportError("fSliceAlias", CheckEqual(fSliceAlias, fSliceAlias, nil), t) + reportError("fString", CheckEqual(fString, fString, nil), t) + reportError("fStringAlias", CheckEqual(fStringAlias, fStringAlias, nil), t) + reportError("fStruct", CheckEqual(fStruct, fStruct, nil), t) + reportError("fStructAlias", CheckEqual(fStructAlias, fStructAlias, nil), t) + reportError("fUint16", CheckEqual(fUint16, fUint16, nil), t) + reportError("fUint16Alias", CheckEqual(fUint16Alias, fUint16Alias, nil), t) + reportError("fUint32", CheckEqual(fUint32, fUint32, nil), t) + reportError("fUint32Alias", CheckEqual(fUint32Alias, fUint32Alias, nil), t) + reportError("fUint64", CheckEqual(fUint64, fUint64, nil), t) + reportError("fUint64Alias", CheckEqual(fUint64Alias, fUint64Alias, nil), t) + reportError("fUint8", CheckEqual(fUint8, fUint8, nil), t) + reportError("fUint8Alias", CheckEqual(fUint8Alias, fUint8Alias, nil), t) + reportError("fUint", CheckEqual(fUint, fUint, nil), t) + reportError("fUintAlias", CheckEqual(fUintAlias, fUintAlias, nil), t) + reportError("fUintptr", CheckEqual(fUintptr, fUintptr, nil), t) + reportError("fUintptrAlias", CheckEqual(fUintptrAlias, fUintptrAlias, nil), t) +} + +// This tests that ArbitraryValue is working by checking that all the arbitrary +// values of type MyStruct have x = 42. +type myStruct struct { + x int +} + +func (m myStruct) Generate(r *rand.Rand, _ int) reflect.Value { + return reflect.ValueOf(myStruct{x: 42}) +} + +func myStructProperty(in myStruct) bool { return in.x == 42 } + +func TestCheckProperty(t *testing.T) { + reportError("myStructProperty", Check(myStructProperty, nil), t) +} + +func TestFailure(t *testing.T) { + f := func(x int) bool { return false } + err := Check(f, nil) + if err == nil { + t.Errorf("Check didn't return an error") + } + if _, ok := err.(*CheckError); !ok { + t.Errorf("Error was not a CheckError: %s", err) + } + + err = CheckEqual(fUint, fUint32, nil) + if err == nil { + t.Errorf("#1 CheckEqual didn't return an error") + } + if _, ok := err.(SetupError); !ok { + t.Errorf("#1 Error was not a SetupError: %s", err) + } + + err = CheckEqual(func(x, y int) {}, func(x int) {}, nil) + if err == nil { + t.Errorf("#2 CheckEqual didn't return an error") + } + if _, ok := err.(SetupError); !ok { + t.Errorf("#2 Error was not a SetupError: %s", err) + } + + err = CheckEqual(func(x int) int { return 0 }, func(x int) int32 { return 0 }, nil) + if err == nil { + t.Errorf("#3 CheckEqual didn't return an error") + } + if _, ok := err.(SetupError); !ok { + t.Errorf("#3 Error was not a SetupError: %s", err) + } +} + +// Recursive data structures didn't terminate. +// Issues 8818 and 11148. +func TestRecursive(t *testing.T) { + type R struct { + Ptr *R + SliceP []*R + Slice []R + Map map[int]R + MapP map[int]*R + MapR map[*R]*R + SliceMap []map[int]R + } + + f := func(r R) bool { return true } + Check(f, nil) +} + +func TestEmptyStruct(t *testing.T) { + f := func(struct{}) bool { return true } + Check(f, nil) +} + +type ( + A struct{ B *B } + B struct{ A *A } +) + +func TestMutuallyRecursive(t *testing.T) { + f := func(a A) bool { return true } + Check(f, nil) +} + +// Some serialization formats (e.g. encoding/pem) cannot distinguish +// between a nil and an empty map or slice, so avoid generating the +// zero value for these. +func TestNonZeroSliceAndMap(t *testing.T) { + type Q struct { + M map[int]int + S []int + } + f := func(q Q) bool { + return q.M != nil && q.S != nil + } + err := Check(f, nil) + if err != nil { + t.Fatal(err) + } +} + +func TestInt64(t *testing.T) { + var lo, hi int64 + f := func(x int64) bool { + if x < lo { + lo = x + } + if x > hi { + hi = x + } + return true + } + cfg := &Config{MaxCount: 10000} + Check(f, cfg) + if uint64(lo)>>62 == 0 || uint64(hi)>>62 == 0 { + t.Errorf("int64 returned range %#016x,%#016x; does not look like full range", lo, hi) + } +} diff --git a/testing/testing/run_example.go b/testing/testing/run_example.go new file mode 100644 index 0000000..b2c5c3d --- /dev/null +++ b/testing/testing/run_example.go @@ -0,0 +1,66 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !js && !wasip1 + +// TODO(@musiol, @odeke-em): re-unify this entire file back into +// example.go when js/wasm gets an os.Pipe implementation +// and no longer needs this separation. + +package testing + +import ( + "fmt" + "io" + "os" + "strings" + "time" +) + +func runExample(eg InternalExample) (ok bool) { + if chatty.on { + fmt.Printf("%s=== RUN %s\n", chatty.prefix(), eg.Name) + } + + // Capture stdout. + stdout := os.Stdout + r, w, err := os.Pipe() + if err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + os.Stdout = w + outC := make(chan string) + go func() { + var buf strings.Builder + _, err := io.Copy(&buf, r) + r.Close() + if err != nil { + fmt.Fprintf(os.Stderr, "testing: copying pipe: %v\n", err) + os.Exit(1) + } + outC <- buf.String() + }() + + finished := false + start := time.Now() + + // Clean up in a deferred call so we can recover if the example panics. + defer func() { + timeSpent := time.Since(start) + + // Close pipe, restore stdout, get output. + w.Close() + os.Stdout = stdout + out := <-outC + + err := recover() + ok = eg.processRunResult(out, timeSpent, finished, err) + }() + + // Run example. + eg.F() + finished = true + return +} diff --git a/testing/testing/run_example_wasm.go b/testing/testing/run_example_wasm.go new file mode 100644 index 0000000..b815fcd --- /dev/null +++ b/testing/testing/run_example_wasm.go @@ -0,0 +1,76 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build js || wasip1 + +package testing + +import ( + "fmt" + "io" + "os" + "strings" + "time" +) + +// TODO(@musiol, @odeke-em): unify this code back into +// example.go when js/wasm gets an os.Pipe implementation. +func runExample(eg InternalExample) (ok bool) { + if chatty.on { + fmt.Printf("%s=== RUN %s\n", chatty.prefix(), eg.Name) + } + + // Capture stdout to temporary file. We're not using + // os.Pipe because it is not supported on js/wasm. + stdout := os.Stdout + f := createTempFile(eg.Name) + os.Stdout = f + finished := false + start := time.Now() + + // Clean up in a deferred call so we can recover if the example panics. + defer func() { + timeSpent := time.Since(start) + + // Restore stdout, get output and remove temporary file. + os.Stdout = stdout + var buf strings.Builder + _, seekErr := f.Seek(0, io.SeekStart) + _, readErr := io.Copy(&buf, f) + out := buf.String() + f.Close() + os.Remove(f.Name()) + if seekErr != nil { + fmt.Fprintf(os.Stderr, "testing: seek temp file: %v\n", seekErr) + os.Exit(1) + } + if readErr != nil { + fmt.Fprintf(os.Stderr, "testing: read temp file: %v\n", readErr) + os.Exit(1) + } + + err := recover() + ok = eg.processRunResult(out, timeSpent, finished, err) + }() + + // Run example. + eg.F() + finished = true + return +} + +func createTempFile(exampleName string) *os.File { + for i := 0; ; i++ { + name := fmt.Sprintf("%s/go-example-stdout-%s-%d.txt", os.TempDir(), exampleName, i) + f, err := os.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) + if err != nil { + if os.IsExist(err) { + continue + } + fmt.Fprintf(os.Stderr, "testing: open temp file: %v\n", err) + os.Exit(1) + } + return f + } +} diff --git a/testing/testing/slogtest/example_test.go b/testing/testing/slogtest/example_test.go new file mode 100644 index 0000000..0517a4b --- /dev/null +++ b/testing/testing/slogtest/example_test.go @@ -0,0 +1,44 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package slogtest_test + +import ( + "bytes" + "encoding/json" + "log" + "log/slog" + "testing/slogtest" +) + +// This example demonstrates one technique for testing a handler with this +// package. The handler is given a [bytes.Buffer] to write to, and each line +// of the resulting output is parsed. +// For JSON output, [encoding/json.Unmarshal] produces a result in the desired +// format when given a pointer to a map[string]any. +func Example_parsing() { + var buf bytes.Buffer + h := slog.NewJSONHandler(&buf, nil) + + results := func() []map[string]any { + var ms []map[string]any + for _, line := range bytes.Split(buf.Bytes(), []byte{'\n'}) { + if len(line) == 0 { + continue + } + var m map[string]any + if err := json.Unmarshal(line, &m); err != nil { + panic(err) // In a real test, use t.Fatal. + } + ms = append(ms, m) + } + return ms + } + err := slogtest.TestHandler(h, results) + if err != nil { + log.Fatal(err) + } + + // Output: +} diff --git a/testing/testing/slogtest/run_test.go b/testing/testing/slogtest/run_test.go new file mode 100644 index 0000000..c82da10 --- /dev/null +++ b/testing/testing/slogtest/run_test.go @@ -0,0 +1,31 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package slogtest_test + +import ( + "bytes" + "encoding/json" + "log/slog" + "testing" + "testing/slogtest" +) + +func TestRun(t *testing.T) { + var buf bytes.Buffer + + newHandler := func(*testing.T) slog.Handler { + buf.Reset() + return slog.NewJSONHandler(&buf, nil) + } + result := func(t *testing.T) map[string]any { + m := map[string]any{} + if err := json.Unmarshal(buf.Bytes(), &m); err != nil { + t.Fatal(err) + } + return m + } + + slogtest.Run(t, newHandler, result) +} diff --git a/testing/testing/slogtest/slogtest.go b/testing/testing/slogtest/slogtest.go new file mode 100644 index 0000000..f9e2d90 --- /dev/null +++ b/testing/testing/slogtest/slogtest.go @@ -0,0 +1,375 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package slogtest implements support for testing implementations of log/slog.Handler. +package slogtest + +import ( + "context" + "errors" + "fmt" + "log/slog" + "reflect" + "runtime" + "testing" + "time" +) + +type testCase struct { + // Subtest name. + name string + // If non-empty, explanation explains the violated constraint. + explanation string + // f executes a single log event using its argument logger. + // So that mkdescs.sh can generate the right description, + // the body of f must appear on a single line whose first + // non-whitespace characters are "l.". + f func(*slog.Logger) + // If mod is not nil, it is called to modify the Record + // generated by the Logger before it is passed to the Handler. + mod func(*slog.Record) + // checks is a list of checks to run on the result. + checks []check +} + +var cases = []testCase{ + { + name: "built-ins", + explanation: withSource("this test expects slog.TimeKey, slog.LevelKey and slog.MessageKey"), + f: func(l *slog.Logger) { + l.Info("message") + }, + checks: []check{ + hasKey(slog.TimeKey), + hasKey(slog.LevelKey), + hasAttr(slog.MessageKey, "message"), + }, + }, + { + name: "attrs", + explanation: withSource("a Handler should output attributes passed to the logging function"), + f: func(l *slog.Logger) { + l.Info("message", "k", "v") + }, + checks: []check{ + hasAttr("k", "v"), + }, + }, + { + name: "empty-attr", + explanation: withSource("a Handler should ignore an empty Attr"), + f: func(l *slog.Logger) { + l.Info("msg", "a", "b", "", nil, "c", "d") + }, + checks: []check{ + hasAttr("a", "b"), + missingKey(""), + hasAttr("c", "d"), + }, + }, + { + name: "zero-time", + explanation: withSource("a Handler should ignore a zero Record.Time"), + f: func(l *slog.Logger) { + l.Info("msg", "k", "v") + }, + mod: func(r *slog.Record) { r.Time = time.Time{} }, + checks: []check{ + missingKey(slog.TimeKey), + }, + }, + { + name: "WithAttrs", + explanation: withSource("a Handler should include the attributes from the WithAttrs method"), + f: func(l *slog.Logger) { + l.With("a", "b").Info("msg", "k", "v") + }, + checks: []check{ + hasAttr("a", "b"), + hasAttr("k", "v"), + }, + }, + { + name: "groups", + explanation: withSource("a Handler should handle Group attributes"), + f: func(l *slog.Logger) { + l.Info("msg", "a", "b", slog.Group("G", slog.String("c", "d")), "e", "f") + }, + checks: []check{ + hasAttr("a", "b"), + inGroup("G", hasAttr("c", "d")), + hasAttr("e", "f"), + }, + }, + { + name: "empty-group", + explanation: withSource("a Handler should ignore an empty group"), + f: func(l *slog.Logger) { + l.Info("msg", "a", "b", slog.Group("G"), "e", "f") + }, + checks: []check{ + hasAttr("a", "b"), + missingKey("G"), + hasAttr("e", "f"), + }, + }, + { + name: "inline-group", + explanation: withSource("a Handler should inline the Attrs of a group with an empty key"), + f: func(l *slog.Logger) { + l.Info("msg", "a", "b", slog.Group("", slog.String("c", "d")), "e", "f") + + }, + checks: []check{ + hasAttr("a", "b"), + hasAttr("c", "d"), + hasAttr("e", "f"), + }, + }, + { + name: "WithGroup", + explanation: withSource("a Handler should handle the WithGroup method"), + f: func(l *slog.Logger) { + l.WithGroup("G").Info("msg", "a", "b") + }, + checks: []check{ + hasKey(slog.TimeKey), + hasKey(slog.LevelKey), + hasAttr(slog.MessageKey, "msg"), + missingKey("a"), + inGroup("G", hasAttr("a", "b")), + }, + }, + { + name: "multi-With", + explanation: withSource("a Handler should handle multiple WithGroup and WithAttr calls"), + f: func(l *slog.Logger) { + l.With("a", "b").WithGroup("G").With("c", "d").WithGroup("H").Info("msg", "e", "f") + }, + checks: []check{ + hasKey(slog.TimeKey), + hasKey(slog.LevelKey), + hasAttr(slog.MessageKey, "msg"), + hasAttr("a", "b"), + inGroup("G", hasAttr("c", "d")), + inGroup("G", inGroup("H", hasAttr("e", "f"))), + }, + }, + { + name: "empty-group-record", + explanation: withSource("a Handler should not output groups if there are no attributes"), + f: func(l *slog.Logger) { + l.With("a", "b").WithGroup("G").With("c", "d").WithGroup("H").Info("msg") + }, + checks: []check{ + hasKey(slog.TimeKey), + hasKey(slog.LevelKey), + hasAttr(slog.MessageKey, "msg"), + hasAttr("a", "b"), + inGroup("G", hasAttr("c", "d")), + inGroup("G", missingKey("H")), + }, + }, + { + name: "resolve", + explanation: withSource("a Handler should call Resolve on attribute values"), + f: func(l *slog.Logger) { + l.Info("msg", "k", &replace{"replaced"}) + }, + checks: []check{hasAttr("k", "replaced")}, + }, + { + name: "resolve-groups", + explanation: withSource("a Handler should call Resolve on attribute values in groups"), + f: func(l *slog.Logger) { + l.Info("msg", + slog.Group("G", + slog.String("a", "v1"), + slog.Any("b", &replace{"v2"}))) + }, + checks: []check{ + inGroup("G", hasAttr("a", "v1")), + inGroup("G", hasAttr("b", "v2")), + }, + }, + { + name: "resolve-WithAttrs", + explanation: withSource("a Handler should call Resolve on attribute values from WithAttrs"), + f: func(l *slog.Logger) { + l = l.With("k", &replace{"replaced"}) + l.Info("msg") + }, + checks: []check{hasAttr("k", "replaced")}, + }, + { + name: "resolve-WithAttrs-groups", + explanation: withSource("a Handler should call Resolve on attribute values in groups from WithAttrs"), + f: func(l *slog.Logger) { + l = l.With(slog.Group("G", + slog.String("a", "v1"), + slog.Any("b", &replace{"v2"}))) + l.Info("msg") + }, + checks: []check{ + inGroup("G", hasAttr("a", "v1")), + inGroup("G", hasAttr("b", "v2")), + }, + }, + { + name: "empty-PC", + explanation: withSource("a Handler should not output SourceKey if the PC is zero"), + f: func(l *slog.Logger) { + l.Info("message") + }, + mod: func(r *slog.Record) { r.PC = 0 }, + checks: []check{ + missingKey(slog.SourceKey), + }, + }, +} + +// TestHandler tests a [slog.Handler]. +// If TestHandler finds any misbehaviors, it returns an error for each, +// combined into a single error with [errors.Join]. +// +// TestHandler installs the given Handler in a [slog.Logger] and +// makes several calls to the Logger's output methods. +// The Handler should be enabled for levels Info and above. +// +// The results function is invoked after all such calls. +// It should return a slice of map[string]any, one for each call to a Logger output method. +// The keys and values of the map should correspond to the keys and values of the Handler's +// output. Each group in the output should be represented as its own nested map[string]any. +// The standard keys [slog.TimeKey], [slog.LevelKey] and [slog.MessageKey] should be used. +// +// If the Handler outputs JSON, then calling [encoding/json.Unmarshal] with a `map[string]any` +// will create the right data structure. +// +// If a Handler intentionally drops an attribute that is checked by a test, +// then the results function should check for its absence and add it to the map it returns. +func TestHandler(h slog.Handler, results func() []map[string]any) error { + // Run the handler on the test cases. + for _, c := range cases { + ht := h + if c.mod != nil { + ht = &wrapper{h, c.mod} + } + l := slog.New(ht) + c.f(l) + } + + // Collect and check the results. + var errs []error + res := results() + if g, w := len(res), len(cases); g != w { + return fmt.Errorf("got %d results, want %d", g, w) + } + for i, got := range res { + c := cases[i] + for _, check := range c.checks { + if problem := check(got); problem != "" { + errs = append(errs, fmt.Errorf("%s: %s", problem, c.explanation)) + } + } + } + return errors.Join(errs...) +} + +// Run exercises a [slog.Handler] on the same test cases as [TestHandler], but +// runs each case in a subtest. For each test case, it first calls newHandler to +// get an instance of the handler under test, then runs the test case, then +// calls result to get the result. If the test case fails, it calls t.Error. +func Run(t *testing.T, newHandler func(*testing.T) slog.Handler, result func(*testing.T) map[string]any) { + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + h := newHandler(t) + if c.mod != nil { + h = &wrapper{h, c.mod} + } + l := slog.New(h) + c.f(l) + got := result(t) + for _, check := range c.checks { + if p := check(got); p != "" { + t.Errorf("%s: %s", p, c.explanation) + } + } + }) + } +} + +type check func(map[string]any) string + +func hasKey(key string) check { + return func(m map[string]any) string { + if _, ok := m[key]; !ok { + return fmt.Sprintf("missing key %q", key) + } + return "" + } +} + +func missingKey(key string) check { + return func(m map[string]any) string { + if _, ok := m[key]; ok { + return fmt.Sprintf("unexpected key %q", key) + } + return "" + } +} + +func hasAttr(key string, wantVal any) check { + return func(m map[string]any) string { + if s := hasKey(key)(m); s != "" { + return s + } + gotVal := m[key] + if !reflect.DeepEqual(gotVal, wantVal) { + return fmt.Sprintf("%q: got %#v, want %#v", key, gotVal, wantVal) + } + return "" + } +} + +func inGroup(name string, c check) check { + return func(m map[string]any) string { + v, ok := m[name] + if !ok { + return fmt.Sprintf("missing group %q", name) + } + g, ok := v.(map[string]any) + if !ok { + return fmt.Sprintf("value for group %q is not map[string]any", name) + } + return c(g) + } +} + +type wrapper struct { + slog.Handler + mod func(*slog.Record) +} + +func (h *wrapper) Handle(ctx context.Context, r slog.Record) error { + h.mod(&r) + return h.Handler.Handle(ctx, r) +} + +func withSource(s string) string { + _, file, line, ok := runtime.Caller(1) + if !ok { + panic("runtime.Caller failed") + } + return fmt.Sprintf("%s (%s:%d)", s, file, line) +} + +type replace struct { + v any +} + +func (r *replace) LogValue() slog.Value { return slog.AnyValue(r.v) } + +func (r *replace) String() string { + return fmt.Sprintf("", r.v) +} diff --git a/testing/testing/sub_test.go b/testing/testing/sub_test.go new file mode 100644 index 0000000..82ec580 --- /dev/null +++ b/testing/testing/sub_test.go @@ -0,0 +1,991 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package testing + +import ( + "bytes" + "fmt" + "regexp" + "runtime" + "slices" + "strings" + "sync" + "sync/atomic" + "time" +) + +func init() { + // Make benchmark tests run 10x faster. + benchTime.d = 100 * time.Millisecond +} + +func TestTestState(t *T) { + const ( + add1 = 0 + done = 1 + ) + type call struct { + typ int // run or done + // result from applying the call + running int + waiting int + started bool + } + testCases := []struct { + max int + run []call + }{{ + max: 1, + run: []call{ + {typ: add1, running: 1, waiting: 0, started: true}, + {typ: done, running: 0, waiting: 0, started: false}, + }, + }, { + max: 1, + run: []call{ + {typ: add1, running: 1, waiting: 0, started: true}, + {typ: add1, running: 1, waiting: 1, started: false}, + {typ: done, running: 1, waiting: 0, started: true}, + {typ: done, running: 0, waiting: 0, started: false}, + {typ: add1, running: 1, waiting: 0, started: true}, + }, + }, { + max: 3, + run: []call{ + {typ: add1, running: 1, waiting: 0, started: true}, + {typ: add1, running: 2, waiting: 0, started: true}, + {typ: add1, running: 3, waiting: 0, started: true}, + {typ: add1, running: 3, waiting: 1, started: false}, + {typ: add1, running: 3, waiting: 2, started: false}, + {typ: add1, running: 3, waiting: 3, started: false}, + {typ: done, running: 3, waiting: 2, started: true}, + {typ: add1, running: 3, waiting: 3, started: false}, + {typ: done, running: 3, waiting: 2, started: true}, + {typ: done, running: 3, waiting: 1, started: true}, + {typ: done, running: 3, waiting: 0, started: true}, + {typ: done, running: 2, waiting: 0, started: false}, + {typ: done, running: 1, waiting: 0, started: false}, + {typ: done, running: 0, waiting: 0, started: false}, + }, + }} + for i, tc := range testCases { + tstate := &testState{ + startParallel: make(chan bool), + maxParallel: tc.max, + } + for j, call := range tc.run { + doCall := func(f func()) chan bool { + done := make(chan bool) + go func() { + f() + done <- true + }() + return done + } + started := false + switch call.typ { + case add1: + signal := doCall(tstate.waitParallel) + select { + case <-signal: + started = true + case tstate.startParallel <- true: + <-signal + } + case done: + signal := doCall(tstate.release) + select { + case <-signal: + case <-tstate.startParallel: + started = true + <-signal + } + } + if started != call.started { + t.Errorf("%d:%d:started: got %v; want %v", i, j, started, call.started) + } + if tstate.running != call.running { + t.Errorf("%d:%d:running: got %v; want %v", i, j, tstate.running, call.running) + } + if tstate.numWaiting != call.waiting { + t.Errorf("%d:%d:waiting: got %v; want %v", i, j, tstate.numWaiting, call.waiting) + } + } + } +} + +func TestTRun(t *T) { + realTest := t + testCases := []struct { + desc string + ok bool + maxPar int + chatty bool + json bool + output string + f func(*T) + }{{ + desc: "failnow skips future sequential and parallel tests at same level", + ok: false, + maxPar: 1, + output: ` +--- FAIL: failnow skips future sequential and parallel tests at same level (N.NNs) + --- FAIL: failnow skips future sequential and parallel tests at same level/#00 (N.NNs) + `, + f: func(t *T) { + ranSeq := false + ranPar := false + t.Run("", func(t *T) { + t.Run("par", func(t *T) { + t.Parallel() + ranPar = true + }) + t.Run("seq", func(t *T) { + ranSeq = true + }) + t.FailNow() + t.Run("seq", func(t *T) { + realTest.Error("test must be skipped") + }) + t.Run("par", func(t *T) { + t.Parallel() + realTest.Error("test must be skipped.") + }) + }) + if !ranPar { + realTest.Error("parallel test was not run") + } + if !ranSeq { + realTest.Error("sequential test was not run") + } + }, + }, { + desc: "failure in parallel test propagates upwards", + ok: false, + maxPar: 1, + output: ` +--- FAIL: failure in parallel test propagates upwards (N.NNs) + --- FAIL: failure in parallel test propagates upwards/#00 (N.NNs) + --- FAIL: failure in parallel test propagates upwards/#00/par (N.NNs) + `, + f: func(t *T) { + t.Run("", func(t *T) { + t.Parallel() + t.Run("par", func(t *T) { + t.Parallel() + t.Fail() + }) + }) + }, + }, { + desc: "skipping without message, chatty", + ok: true, + chatty: true, + output: ` +=== RUN skipping without message, chatty +--- SKIP: skipping without message, chatty (N.NNs)`, + f: func(t *T) { t.SkipNow() }, + }, { + desc: "chatty with recursion", + ok: true, + chatty: true, + output: ` +=== RUN chatty with recursion +=== RUN chatty with recursion/#00 +=== RUN chatty with recursion/#00/#00 +--- PASS: chatty with recursion (N.NNs) + --- PASS: chatty with recursion/#00 (N.NNs) + --- PASS: chatty with recursion/#00/#00 (N.NNs)`, + f: func(t *T) { + t.Run("", func(t *T) { + t.Run("", func(t *T) {}) + }) + }, + }, { + desc: "chatty with recursion and json", + ok: false, + chatty: true, + json: true, + output: ` +^V=== RUN chatty with recursion and json +^V=== RUN chatty with recursion and json/#00 +^V=== RUN chatty with recursion and json/#00/#00 +^V--- PASS: chatty with recursion and json/#00/#00 (N.NNs) +^V=== NAME chatty with recursion and json/#00 +^V=== RUN chatty with recursion and json/#00/#01 + sub_test.go:NNN: skip +^V--- SKIP: chatty with recursion and json/#00/#01 (N.NNs) +^V=== NAME chatty with recursion and json/#00 +^V=== RUN chatty with recursion and json/#00/#02 + sub_test.go:NNN: fail +^V--- FAIL: chatty with recursion and json/#00/#02 (N.NNs) +^V=== NAME chatty with recursion and json/#00 +^V--- FAIL: chatty with recursion and json/#00 (N.NNs) +^V=== NAME chatty with recursion and json +^V--- FAIL: chatty with recursion and json (N.NNs) +^V=== NAME `, + f: func(t *T) { + t.Run("", func(t *T) { + t.Run("", func(t *T) {}) + t.Run("", func(t *T) { t.Skip("skip") }) + t.Run("", func(t *T) { t.Fatal("fail") }) + }) + }, + }, { + desc: "skipping without message, not chatty", + ok: true, + f: func(t *T) { t.SkipNow() }, + }, { + desc: "skipping after error", + output: ` +--- FAIL: skipping after error (N.NNs) + sub_test.go:NNN: an error + sub_test.go:NNN: skipped`, + f: func(t *T) { + t.Error("an error") + t.Skip("skipped") + }, + }, { + desc: "use Run to locally synchronize parallelism", + ok: true, + maxPar: 1, + f: func(t *T) { + var count uint32 + t.Run("waitGroup", func(t *T) { + for i := 0; i < 4; i++ { + t.Run("par", func(t *T) { + t.Parallel() + atomic.AddUint32(&count, 1) + }) + } + }) + if count != 4 { + t.Errorf("count was %d; want 4", count) + } + }, + }, { + desc: "alternate sequential and parallel", + // Sequential tests should partake in the counting of running threads. + // Otherwise, if one runs parallel subtests in sequential tests that are + // itself subtests of parallel tests, the counts can get askew. + ok: true, + maxPar: 1, + f: func(t *T) { + t.Run("a", func(t *T) { + t.Parallel() + t.Run("b", func(t *T) { + // Sequential: ensure running count is decremented. + t.Run("c", func(t *T) { + t.Parallel() + }) + + }) + }) + }, + }, { + desc: "alternate sequential and parallel 2", + // Sequential tests should partake in the counting of running threads. + // Otherwise, if one runs parallel subtests in sequential tests that are + // itself subtests of parallel tests, the counts can get askew. + ok: true, + maxPar: 2, + f: func(t *T) { + for i := 0; i < 2; i++ { + t.Run("a", func(t *T) { + t.Parallel() + time.Sleep(time.Nanosecond) + for i := 0; i < 2; i++ { + t.Run("b", func(t *T) { + time.Sleep(time.Nanosecond) + for i := 0; i < 2; i++ { + t.Run("c", func(t *T) { + t.Parallel() + time.Sleep(time.Nanosecond) + }) + } + + }) + } + }) + } + }, + }, { + desc: "stress test", + ok: true, + maxPar: 4, + f: func(t *T) { + t.Parallel() + for i := 0; i < 12; i++ { + t.Run("a", func(t *T) { + t.Parallel() + time.Sleep(time.Nanosecond) + for i := 0; i < 12; i++ { + t.Run("b", func(t *T) { + time.Sleep(time.Nanosecond) + for i := 0; i < 12; i++ { + t.Run("c", func(t *T) { + t.Parallel() + time.Sleep(time.Nanosecond) + t.Run("d1", func(t *T) {}) + t.Run("d2", func(t *T) {}) + t.Run("d3", func(t *T) {}) + t.Run("d4", func(t *T) {}) + }) + } + }) + } + }) + } + }, + }, { + desc: "skip output", + ok: true, + maxPar: 4, + f: func(t *T) { + t.Skip() + }, + }, { + desc: "subtest calls error on parent", + ok: false, + output: ` +--- FAIL: subtest calls error on parent (N.NNs) + sub_test.go:NNN: first this + sub_test.go:NNN: and now this! + sub_test.go:NNN: oh, and this too`, + maxPar: 1, + f: func(t *T) { + t.Errorf("first this") + outer := t + t.Run("", func(t *T) { + outer.Errorf("and now this!") + }) + t.Errorf("oh, and this too") + }, + }, { + desc: "subtest calls fatal on parent", + ok: false, + output: ` +--- FAIL: subtest calls fatal on parent (N.NNs) + sub_test.go:NNN: first this + sub_test.go:NNN: and now this! + --- FAIL: subtest calls fatal on parent/#00 (N.NNs) + testing.go:NNN: test executed panic(nil) or runtime.Goexit: subtest may have called FailNow on a parent test`, + maxPar: 1, + f: func(t *T) { + outer := t + t.Errorf("first this") + t.Run("", func(t *T) { + outer.Fatalf("and now this!") + }) + t.Errorf("Should not reach here.") + }, + }, { + desc: "subtest calls error on ancestor", + ok: false, + output: ` +--- FAIL: subtest calls error on ancestor (N.NNs) + sub_test.go:NNN: Report to ancestor + --- FAIL: subtest calls error on ancestor/#00 (N.NNs) + sub_test.go:NNN: Still do this + sub_test.go:NNN: Also do this`, + maxPar: 1, + f: func(t *T) { + outer := t + t.Run("", func(t *T) { + t.Run("", func(t *T) { + outer.Errorf("Report to ancestor") + }) + t.Errorf("Still do this") + }) + t.Errorf("Also do this") + }, + }, { + desc: "subtest calls fatal on ancestor", + ok: false, + output: ` +--- FAIL: subtest calls fatal on ancestor (N.NNs) + sub_test.go:NNN: Nope`, + maxPar: 1, + f: func(t *T) { + outer := t + t.Run("", func(t *T) { + for i := 0; i < 4; i++ { + t.Run("", func(t *T) { + outer.Fatalf("Nope") + }) + t.Errorf("Don't do this") + } + t.Errorf("And neither do this") + }) + t.Errorf("Nor this") + }, + }, { + desc: "panic on goroutine fail after test exit", + ok: false, + maxPar: 4, + f: func(t *T) { + ch := make(chan bool) + t.Run("", func(t *T) { + go func() { + <-ch + defer func() { + if r := recover(); r == nil { + realTest.Errorf("expected panic") + } + ch <- true + }() + t.Errorf("failed after success") + }() + }) + ch <- true + <-ch + }, + }, { + desc: "log in finished sub test logs to parent", + ok: false, + output: ` + --- FAIL: log in finished sub test logs to parent (N.NNs) + sub_test.go:NNN: message2 + sub_test.go:NNN: message1 + sub_test.go:NNN: error`, + maxPar: 1, + f: func(t *T) { + ch := make(chan bool) + t.Run("sub", func(t2 *T) { + go func() { + <-ch + t2.Log("message1") + ch <- true + }() + }) + t.Log("message2") + ch <- true + <-ch + t.Errorf("error") + }, + }, { + // A chatty test should always log with fmt.Print, even if the + // parent test has completed. + desc: "log in finished sub test with chatty", + ok: false, + chatty: true, + output: ` + --- FAIL: log in finished sub test with chatty (N.NNs)`, + maxPar: 1, + f: func(t *T) { + ch := make(chan bool) + t.Run("sub", func(t2 *T) { + go func() { + <-ch + t2.Log("message1") + ch <- true + }() + }) + t.Log("message2") + ch <- true + <-ch + t.Errorf("error") + }, + }, { + // If a subtest panics we should run cleanups. + desc: "cleanup when subtest panics", + ok: false, + chatty: false, + output: ` +--- FAIL: cleanup when subtest panics (N.NNs) + --- FAIL: cleanup when subtest panics/sub (N.NNs) + sub_test.go:NNN: running cleanup`, + f: func(t *T) { + t.Cleanup(func() { t.Log("running cleanup") }) + t.Run("sub", func(t2 *T) { + t2.FailNow() + }) + }, + }} + for _, tc := range testCases { + t.Run(tc.desc, func(t *T) { + tstate := newTestState(tc.maxPar, allMatcher()) + buf := &strings.Builder{} + root := &T{ + common: common{ + signal: make(chan bool), + barrier: make(chan bool), + name: "", + w: buf, + }, + tstate: tstate, + } + if tc.chatty { + root.chatty = newChattyPrinter(root.w) + root.chatty.json = tc.json + } + ok := root.Run(tc.desc, tc.f) + tstate.release() + + if ok != tc.ok { + t.Errorf("%s:ok: got %v; want %v", tc.desc, ok, tc.ok) + } + if ok != !root.Failed() { + t.Errorf("%s:root failed: got %v; want %v", tc.desc, !ok, root.Failed()) + } + if tstate.running != 0 || tstate.numWaiting != 0 { + t.Errorf("%s:running and waiting non-zero: got %d and %d", tc.desc, tstate.running, tstate.numWaiting) + } + got := strings.TrimSpace(buf.String()) + want := strings.TrimSpace(tc.output) + re := makeRegexp(want) + if ok, err := regexp.MatchString(re, got); !ok || err != nil { + t.Errorf("%s:output:\ngot:\n%s\nwant:\n%s", tc.desc, got, want) + } + }) + } +} + +func TestBRun(t *T) { + work := func(b *B) { + for i := 0; i < b.N; i++ { + time.Sleep(time.Nanosecond) + } + } + testCases := []struct { + desc string + failed bool + chatty bool + output string + f func(*B) + }{{ + desc: "simulate sequential run of subbenchmarks.", + f: func(b *B) { + b.Run("", func(b *B) { work(b) }) + time1 := b.result.NsPerOp() + b.Run("", func(b *B) { work(b) }) + time2 := b.result.NsPerOp() + if time1 >= time2 { + t.Errorf("no time spent in benchmark t1 >= t2 (%d >= %d)", time1, time2) + } + }, + }, { + desc: "bytes set by all benchmarks", + f: func(b *B) { + b.Run("", func(b *B) { b.SetBytes(10); work(b) }) + b.Run("", func(b *B) { b.SetBytes(10); work(b) }) + if b.result.Bytes != 20 { + t.Errorf("bytes: got: %d; want 20", b.result.Bytes) + } + }, + }, { + desc: "bytes set by some benchmarks", + // In this case the bytes result is meaningless, so it must be 0. + f: func(b *B) { + b.Run("", func(b *B) { b.SetBytes(10); work(b) }) + b.Run("", func(b *B) { work(b) }) + b.Run("", func(b *B) { b.SetBytes(10); work(b) }) + if b.result.Bytes != 0 { + t.Errorf("bytes: got: %d; want 0", b.result.Bytes) + } + }, + }, { + desc: "failure carried over to root", + failed: true, + output: "--- FAIL: root", + f: func(b *B) { b.Fail() }, + }, { + desc: "skipping without message, chatty", + chatty: true, + output: "--- SKIP: root", + f: func(b *B) { b.SkipNow() }, + }, { + desc: "chatty with recursion", + chatty: true, + f: func(b *B) { + b.Run("", func(b *B) { + b.Run("", func(b *B) {}) + }) + }, + }, { + desc: "skipping without message, not chatty", + f: func(b *B) { b.SkipNow() }, + }, { + desc: "skipping after error", + failed: true, + output: ` +--- FAIL: root + sub_test.go:NNN: an error + sub_test.go:NNN: skipped`, + f: func(b *B) { + b.Error("an error") + b.Skip("skipped") + }, + }, { + desc: "memory allocation", + f: func(b *B) { + const bufSize = 256 + alloc := func(b *B) { + var buf [bufSize]byte + for i := 0; i < b.N; i++ { + _ = append([]byte(nil), buf[:]...) + } + } + b.Run("", func(b *B) { + alloc(b) + b.ReportAllocs() + }) + b.Run("", func(b *B) { + alloc(b) + b.ReportAllocs() + }) + // runtime.MemStats sometimes reports more allocations than the + // benchmark is responsible for. Luckily the point of this test is + // to ensure that the results are not underreported, so we can + // simply verify the lower bound. + if got := b.result.MemAllocs; got < 2 { + t.Errorf("MemAllocs was %v; want 2", got) + } + if got := b.result.MemBytes; got < 2*bufSize { + t.Errorf("MemBytes was %v; want %v", got, 2*bufSize) + } + }, + }, { + desc: "cleanup is called", + f: func(b *B) { + var calls, cleanups, innerCalls, innerCleanups int + b.Run("", func(b *B) { + calls++ + b.Cleanup(func() { + cleanups++ + }) + b.Run("", func(b *B) { + b.Cleanup(func() { + innerCleanups++ + }) + innerCalls++ + }) + work(b) + }) + if calls == 0 || calls != cleanups { + t.Errorf("mismatched cleanups; got %d want %d", cleanups, calls) + } + if innerCalls == 0 || innerCalls != innerCleanups { + t.Errorf("mismatched cleanups; got %d want %d", cleanups, calls) + } + }, + }, { + desc: "cleanup is called on failure", + failed: true, + f: func(b *B) { + var calls, cleanups int + b.Run("", func(b *B) { + calls++ + b.Cleanup(func() { + cleanups++ + }) + b.Fatalf("failure") + }) + if calls == 0 || calls != cleanups { + t.Errorf("mismatched cleanups; got %d want %d", cleanups, calls) + } + }, + }} + hideStdoutForTesting = true + defer func() { + hideStdoutForTesting = false + }() + for _, tc := range testCases { + t.Run(tc.desc, func(t *T) { + var ok bool + buf := &strings.Builder{} + // This is almost like the Benchmark function, except that we override + // the benchtime and catch the failure result of the subbenchmark. + root := &B{ + common: common{ + signal: make(chan bool), + name: "root", + w: buf, + }, + benchFunc: func(b *B) { ok = b.Run("test", tc.f) }, // Use Run to catch failure. + benchTime: durationOrCountFlag{d: 1 * time.Microsecond}, + } + if tc.chatty { + root.chatty = newChattyPrinter(root.w) + } + root.runN(1) + if ok != !tc.failed { + t.Errorf("%s:ok: got %v; want %v", tc.desc, ok, !tc.failed) + } + if !ok != root.Failed() { + t.Errorf("%s:root failed: got %v; want %v", tc.desc, !ok, root.Failed()) + } + // All tests are run as subtests + if root.result.N != 1 { + t.Errorf("%s: N for parent benchmark was %d; want 1", tc.desc, root.result.N) + } + got := strings.TrimSpace(buf.String()) + want := strings.TrimSpace(tc.output) + re := makeRegexp(want) + if ok, err := regexp.MatchString(re, got); !ok || err != nil { + t.Errorf("%s:output:\ngot:\n%s\nwant:\n%s", tc.desc, got, want) + } + }) + } +} + +func makeRegexp(s string) string { + s = regexp.QuoteMeta(s) + s = strings.ReplaceAll(s, "^V", "\x16") + s = strings.ReplaceAll(s, ":NNN:", `:\d\d\d\d?:`) + s = strings.ReplaceAll(s, "N\\.NNs", `\d*\.\d*s`) + return s +} + +func TestBenchmarkOutput(t *T) { + // Ensure Benchmark initialized common.w by invoking it with an error and + // normal case. + Benchmark(func(b *B) { b.Error("do not print this output") }) + Benchmark(func(b *B) {}) +} + +func TestBenchmarkStartsFrom1(t *T) { + var first = true + Benchmark(func(b *B) { + if first && b.N != 1 { + panic(fmt.Sprintf("Benchmark() first N=%v; want 1", b.N)) + } + first = false + }) +} + +func TestBenchmarkReadMemStatsBeforeFirstRun(t *T) { + var first = true + Benchmark(func(b *B) { + if first && (b.startAllocs == 0 || b.startBytes == 0) { + panic("ReadMemStats not called before first run") + } + first = false + }) +} + +type funcWriter struct { + write func([]byte) (int, error) +} + +func (fw *funcWriter) Write(b []byte) (int, error) { + return fw.write(b) +} + +func TestRacyOutput(t *T) { + var runs int32 // The number of running Writes + var races int32 // Incremented for each race detected + raceDetector := func(b []byte) (int, error) { + // Check if some other goroutine is concurrently calling Write. + if atomic.LoadInt32(&runs) > 0 { + atomic.AddInt32(&races, 1) // Race detected! + } + atomic.AddInt32(&runs, 1) + defer atomic.AddInt32(&runs, -1) + runtime.Gosched() // Increase probability of a race + return len(b), nil + } + + root := &T{ + common: common{w: &funcWriter{raceDetector}}, + tstate: newTestState(1, allMatcher()), + } + root.chatty = newChattyPrinter(root.w) + root.Run("", func(t *T) { + var wg sync.WaitGroup + for i := 0; i < 100; i++ { + wg.Add(1) + go func(i int) { + defer wg.Done() + t.Run(fmt.Sprint(i), func(t *T) { + t.Logf("testing run %d", i) + }) + }(i) + } + wg.Wait() + }) + + if races > 0 { + t.Errorf("detected %d racy Writes", races) + } +} + +// The late log message did not include the test name. Issue 29388. +func TestLogAfterComplete(t *T) { + tstate := newTestState(1, allMatcher()) + var buf bytes.Buffer + t1 := &T{ + common: common{ + // Use a buffered channel so that tRunner can write + // to it although nothing is reading from it. + signal: make(chan bool, 1), + w: &buf, + }, + tstate: tstate, + } + + c1 := make(chan bool) + c2 := make(chan string) + tRunner(t1, func(t *T) { + t.Run("TestLateLog", func(t *T) { + go func() { + defer close(c2) + defer func() { + p := recover() + if p == nil { + c2 <- "subtest did not panic" + return + } + s, ok := p.(string) + if !ok { + c2 <- fmt.Sprintf("subtest panic with unexpected value %v", p) + return + } + const want = "Log in goroutine after TestLateLog has completed: log after test" + if !strings.Contains(s, want) { + c2 <- fmt.Sprintf("subtest panic %q does not contain %q", s, want) + } + }() + + <-c1 + t.Log("log after test") + }() + }) + }) + close(c1) + + if s := <-c2; s != "" { + t.Error(s) + } +} + +func TestBenchmark(t *T) { + if Short() { + t.Skip("skipping in short mode") + } + res := Benchmark(func(b *B) { + for i := 0; i < 5; i++ { + b.Run("", func(b *B) { + for i := 0; i < b.N; i++ { + time.Sleep(time.Millisecond) + } + }) + } + }) + if res.NsPerOp() < 4000000 { + t.Errorf("want >5ms; got %v", time.Duration(res.NsPerOp())) + } +} + +func TestCleanup(t *T) { + var cleanups []int + t.Run("test", func(t *T) { + t.Cleanup(func() { cleanups = append(cleanups, 1) }) + t.Cleanup(func() { cleanups = append(cleanups, 2) }) + }) + if got, want := cleanups, []int{2, 1}; !slices.Equal(got, want) { + t.Errorf("unexpected cleanup record; got %v want %v", got, want) + } +} + +func TestConcurrentCleanup(t *T) { + cleanups := 0 + t.Run("test", func(t *T) { + var wg sync.WaitGroup + wg.Add(2) + for i := 0; i < 2; i++ { + i := i + go func() { + t.Cleanup(func() { + // Although the calls to Cleanup are concurrent, the functions passed + // to Cleanup should be called sequentially, in some nondeterministic + // order based on when the Cleanup calls happened to be scheduled. + // So these assignments to the cleanups variable should not race. + cleanups |= 1 << i + }) + wg.Done() + }() + } + wg.Wait() + }) + if cleanups != 1|2 { + t.Errorf("unexpected cleanup; got %d want 3", cleanups) + } +} + +func TestCleanupCalledEvenAfterGoexit(t *T) { + cleanups := 0 + t.Run("test", func(t *T) { + t.Cleanup(func() { + cleanups++ + }) + t.Cleanup(func() { + runtime.Goexit() + }) + }) + if cleanups != 1 { + t.Errorf("unexpected cleanup count; got %d want 1", cleanups) + } +} + +func TestRunCleanup(t *T) { + outerCleanup := 0 + innerCleanup := 0 + t.Run("test", func(t *T) { + t.Cleanup(func() { outerCleanup++ }) + t.Run("x", func(t *T) { + t.Cleanup(func() { innerCleanup++ }) + }) + }) + if innerCleanup != 1 { + t.Errorf("unexpected inner cleanup count; got %d want 1", innerCleanup) + } + if outerCleanup != 1 { + t.Errorf("unexpected outer cleanup count; got %d want 0", outerCleanup) + } +} + +func TestCleanupParallelSubtests(t *T) { + ranCleanup := 0 + t.Run("test", func(t *T) { + t.Cleanup(func() { ranCleanup++ }) + t.Run("x", func(t *T) { + t.Parallel() + if ranCleanup > 0 { + t.Error("outer cleanup ran before parallel subtest") + } + }) + }) + if ranCleanup != 1 { + t.Errorf("unexpected cleanup count; got %d want 1", ranCleanup) + } +} + +func TestNestedCleanup(t *T) { + ranCleanup := 0 + t.Run("test", func(t *T) { + t.Cleanup(func() { + if ranCleanup != 2 { + t.Errorf("unexpected cleanup count in first cleanup: got %d want 2", ranCleanup) + } + ranCleanup++ + }) + t.Cleanup(func() { + if ranCleanup != 0 { + t.Errorf("unexpected cleanup count in second cleanup: got %d want 0", ranCleanup) + } + ranCleanup++ + t.Cleanup(func() { + if ranCleanup != 1 { + t.Errorf("unexpected cleanup count in nested cleanup: got %d want 1", ranCleanup) + } + ranCleanup++ + }) + }) + }) + if ranCleanup != 3 { + t.Errorf("unexpected cleanup count: got %d want 3", ranCleanup) + } +} diff --git a/testing/testing/synctest/context_example_test.go b/testing/testing/synctest/context_example_test.go new file mode 100644 index 0000000..5f7205e --- /dev/null +++ b/testing/testing/synctest/context_example_test.go @@ -0,0 +1,78 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build goexperiment.synctest + +package synctest_test + +import ( + "context" + "fmt" + "testing/synctest" + "time" +) + +// This example demonstrates testing the context.AfterFunc function. +// +// AfterFunc registers a function to execute in a new goroutine +// after a context is canceled. +// +// The test verifies that the function is not run before the context is canceled, +// and is run after the context is canceled. +func Example_contextAfterFunc() { + synctest.Run(func() { + // Create a context.Context which can be canceled. + ctx, cancel := context.WithCancel(context.Background()) + + // context.AfterFunc registers a function to be called + // when a context is canceled. + afterFuncCalled := false + context.AfterFunc(ctx, func() { + afterFuncCalled = true + }) + + // The context has not been canceled, so the AfterFunc is not called. + synctest.Wait() + fmt.Printf("before context is canceled: afterFuncCalled=%v\n", afterFuncCalled) + + // Cancel the context and wait for the AfterFunc to finish executing. + // Verify that the AfterFunc ran. + cancel() + synctest.Wait() + fmt.Printf("after context is canceled: afterFuncCalled=%v\n", afterFuncCalled) + + // Output: + // before context is canceled: afterFuncCalled=false + // after context is canceled: afterFuncCalled=true + }) +} + +// This example demonstrates testing the context.WithTimeout function. +// +// WithTimeout creates a context which is canceled after a timeout. +// +// The test verifies that the context is not canceled before the timeout expires, +// and is canceled after the timeout expires. +func Example_contextWithTimeout() { + synctest.Run(func() { + // Create a context.Context which is canceled after a timeout. + const timeout = 5 * time.Second + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + + // Wait just less than the timeout. + time.Sleep(timeout - time.Nanosecond) + synctest.Wait() + fmt.Printf("before timeout: ctx.Err() = %v\n", ctx.Err()) + + // Wait the rest of the way until the timeout. + time.Sleep(time.Nanosecond) + synctest.Wait() + fmt.Printf("after timeout: ctx.Err() = %v\n", ctx.Err()) + + // Output: + // before timeout: ctx.Err() = + // after timeout: ctx.Err() = context deadline exceeded + }) +} diff --git a/testing/testing/synctest/synctest.go b/testing/testing/synctest/synctest.go new file mode 100644 index 0000000..afae459 --- /dev/null +++ b/testing/testing/synctest/synctest.go @@ -0,0 +1,67 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build goexperiment.synctest + +// Package synctest provides support for testing concurrent code. +// +// This package only exists when using Go compiled with GOEXPERIMENT=synctest. +// It is experimental, and not subject to the Go 1 compatibility promise. +package synctest + +import ( + "github.com/CodSpeedHQ/codspeed-go/testing/internal/synctest" +) + +// Run executes f in a new goroutine. +// +// The new goroutine and any goroutines transitively started by it form +// an isolated "bubble". +// Run waits for all goroutines in the bubble to exit before returning. +// +// Goroutines in the bubble use a synthetic time implementation. +// The initial time is midnight UTC 2000-01-01. +// +// Time advances when every goroutine in the bubble is blocked. +// For example, a call to time.Sleep will block until all other +// goroutines are blocked and return after the bubble's clock has +// advanced. See [Wait] for the specific definition of blocked. +// +// If every goroutine is blocked and there are no timers scheduled, +// Run panics. +// +// Channels, time.Timers, and time.Tickers created within the bubble +// are associated with it. Operating on a bubbled channel, timer, or ticker +// from outside the bubble panics. +func Run(f func()) { + synctest.Run(f) +} + +// Wait blocks until every goroutine within the current bubble, +// other than the current goroutine, is durably blocked. +// It panics if called from a non-bubbled goroutine, +// or if two goroutines in the same bubble call Wait at the same time. +// +// A goroutine is durably blocked if can only be unblocked by another +// goroutine in its bubble. The following operations durably block +// a goroutine: +// - a send or receive on a channel from within the bubble +// - a select statement where every case is a channel within the bubble +// - sync.Cond.Wait +// - time.Sleep +// +// A goroutine executing a system call or waiting for an external event +// such as a network operation is not durably blocked. +// For example, a goroutine blocked reading from an network connection +// is not durably blocked even if no data is currently available on the +// connection, because it may be unblocked by data written from outside +// the bubble or may be in the process of receiving data from a kernel +// network buffer. +// +// A goroutine is not durably blocked when blocked on a send or receive +// on a channel that was not created within its bubble, because it may +// be unblocked by a channel receive or send from outside its bubble. +func Wait() { + synctest.Wait() +} diff --git a/testing/testing/testing.go b/testing/testing/testing.go new file mode 100644 index 0000000..34b5b66 --- /dev/null +++ b/testing/testing/testing.go @@ -0,0 +1,2531 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package testing provides support for automated testing of Go packages. +// It is intended to be used in concert with the "go test" command, which automates +// execution of any function of the form +// +// func TestXxx(*testing.T) +// +// where Xxx does not start with a lowercase letter. The function name +// serves to identify the test routine. +// +// Within these functions, use the Error, Fail or related methods to signal failure. +// +// To write a new test suite, create a file that +// contains the TestXxx functions as described here, +// and give that file a name ending in "_test.go". +// The file will be excluded from regular +// package builds but will be included when the "go test" command is run. +// +// The test file can be in the same package as the one being tested, +// or in a corresponding package with the suffix "_test". +// +// If the test file is in the same package, it may refer to unexported +// identifiers within the package, as in this example: +// +// package abs +// +// import "testing" +// +// func TestAbs(t *testing.T) { +// got := Abs(-1) +// if got != 1 { +// t.Errorf("Abs(-1) = %d; want 1", got) +// } +// } +// +// If the file is in a separate "_test" package, the package being tested +// must be imported explicitly and only its exported identifiers may be used. +// This is known as "black box" testing. +// +// package abs_test +// +// import ( +// "testing" +// +// "path_to_pkg/abs" +// ) +// +// func TestAbs(t *testing.T) { +// got := abs.Abs(-1) +// if got != 1 { +// t.Errorf("Abs(-1) = %d; want 1", got) +// } +// } +// +// For more detail, run "go help test" and "go help testflag". +// +// # Benchmarks +// +// Functions of the form +// +// func BenchmarkXxx(*testing.B) +// +// are considered benchmarks, and are executed by the "go test" command when +// its -bench flag is provided. Benchmarks are run sequentially. +// +// For a description of the testing flags, see +// https://golang.org/cmd/go/#hdr-Testing_flags. +// +// A sample benchmark function looks like this: +// +// func BenchmarkRandInt(b *testing.B) { +// for b.Loop() { +// rand.Int() +// } +// } +// +// The output +// +// BenchmarkRandInt-8 68453040 17.8 ns/op +// +// means that the body of the loop ran 68453040 times at a speed of 17.8 ns per loop. +// +// Only the body of the loop is timed, so benchmarks may do expensive +// setup before calling b.Loop, which will not be counted toward the +// benchmark measurement: +// +// func BenchmarkBigLen(b *testing.B) { +// big := NewBig() +// for b.Loop() { +// big.Len() +// } +// } +// +// If a benchmark needs to test performance in a parallel setting, it may use +// the RunParallel helper function; such benchmarks are intended to be used with +// the go test -cpu flag: +// +// func BenchmarkTemplateParallel(b *testing.B) { +// templ := template.Must(template.New("test").Parse("Hello, {{.}}!")) +// b.RunParallel(func(pb *testing.PB) { +// var buf bytes.Buffer +// for pb.Next() { +// buf.Reset() +// templ.Execute(&buf, "World") +// } +// }) +// } +// +// A detailed specification of the benchmark results format is given +// in https://golang.org/design/14313-benchmark-format. +// +// There are standard tools for working with benchmark results at +// https://golang.org/x/perf/cmd. +// In particular, https://golang.org/x/perf/cmd/benchstat performs +// statistically robust A/B comparisons. +// +// # b.N-style benchmarks +// +// Prior to the introduction of [B.Loop], benchmarks were written in a +// different style using B.N. For example: +// +// func BenchmarkRandInt(b *testing.B) { +// for range b.N { +// rand.Int() +// } +// } +// +// In this style of benchmark, the benchmark function must run +// the target code b.N times. The benchmark function is called +// multiple times with b.N adjusted until the benchmark function +// lasts long enough to be timed reliably. This also means any setup +// done before the loop may be run several times. +// +// If a benchmark needs some expensive setup before running, the timer +// should be explicitly reset: +// +// func BenchmarkBigLen(b *testing.B) { +// big := NewBig() +// b.ResetTimer() +// for range b.N { +// big.Len() +// } +// } +// +// New benchmarks should prefer using [B.Loop], which is more robust +// and more efficient. +// +// # Examples +// +// The package also runs and verifies example code. Example functions may +// include a concluding line comment that begins with "Output:" and is compared with +// the standard output of the function when the tests are run. (The comparison +// ignores leading and trailing space.) These are examples of an example: +// +// func ExampleHello() { +// fmt.Println("hello") +// // Output: hello +// } +// +// func ExampleSalutations() { +// fmt.Println("hello, and") +// fmt.Println("goodbye") +// // Output: +// // hello, and +// // goodbye +// } +// +// The comment prefix "Unordered output:" is like "Output:", but matches any +// line order: +// +// func ExamplePerm() { +// for _, value := range Perm(5) { +// fmt.Println(value) +// } +// // Unordered output: 4 +// // 2 +// // 1 +// // 3 +// // 0 +// } +// +// Example functions without output comments are compiled but not executed. +// +// The naming convention to declare examples for the package, a function F, a type T and +// method M on type T are: +// +// func Example() { ... } +// func ExampleF() { ... } +// func ExampleT() { ... } +// func ExampleT_M() { ... } +// +// Multiple example functions for a package/type/function/method may be provided by +// appending a distinct suffix to the name. The suffix must start with a +// lower-case letter. +// +// func Example_suffix() { ... } +// func ExampleF_suffix() { ... } +// func ExampleT_suffix() { ... } +// func ExampleT_M_suffix() { ... } +// +// The entire test file is presented as the example when it contains a single +// example function, at least one other function, type, variable, or constant +// declaration, and no test or benchmark functions. +// +// # Fuzzing +// +// 'go test' and the testing package support fuzzing, a testing technique where +// a function is called with randomly generated inputs to find bugs not +// anticipated by unit tests. +// +// Functions of the form +// +// func FuzzXxx(*testing.F) +// +// are considered fuzz tests. +// +// For example: +// +// func FuzzHex(f *testing.F) { +// for _, seed := range [][]byte{{}, {0}, {9}, {0xa}, {0xf}, {1, 2, 3, 4}} { +// f.Add(seed) +// } +// f.Fuzz(func(t *testing.T, in []byte) { +// enc := hex.EncodeToString(in) +// out, err := hex.DecodeString(enc) +// if err != nil { +// t.Fatalf("%v: decode: %v", in, err) +// } +// if !bytes.Equal(in, out) { +// t.Fatalf("%v: not equal after round trip: %v", in, out) +// } +// }) +// } +// +// A fuzz test maintains a seed corpus, or a set of inputs which are run by +// default, and can seed input generation. Seed inputs may be registered by +// calling (*F).Add or by storing files in the directory testdata/fuzz/ +// (where is the name of the fuzz test) within the package containing +// the fuzz test. Seed inputs are optional, but the fuzzing engine may find +// bugs more efficiently when provided with a set of small seed inputs with good +// code coverage. These seed inputs can also serve as regression tests for bugs +// identified through fuzzing. +// +// The function passed to (*F).Fuzz within the fuzz test is considered the fuzz +// target. A fuzz target must accept a *T parameter, followed by one or more +// parameters for random inputs. The types of arguments passed to (*F).Add must +// be identical to the types of these parameters. The fuzz target may signal +// that it's found a problem the same way tests do: by calling T.Fail (or any +// method that calls it like T.Error or T.Fatal) or by panicking. +// +// When fuzzing is enabled (by setting the -fuzz flag to a regular expression +// that matches a specific fuzz test), the fuzz target is called with arguments +// generated by repeatedly making random changes to the seed inputs. On +// supported platforms, 'go test' compiles the test executable with fuzzing +// coverage instrumentation. The fuzzing engine uses that instrumentation to +// find and cache inputs that expand coverage, increasing the likelihood of +// finding bugs. If the fuzz target fails for a given input, the fuzzing engine +// writes the inputs that caused the failure to a file in the directory +// testdata/fuzz/ within the package directory. This file later serves as +// a seed input. If the file can't be written at that location (for example, +// because the directory is read-only), the fuzzing engine writes the file to +// the fuzz cache directory within the build cache instead. +// +// When fuzzing is disabled, the fuzz target is called with the seed inputs +// registered with F.Add and seed inputs from testdata/fuzz/. In this +// mode, the fuzz test acts much like a regular test, with subtests started +// with F.Fuzz instead of T.Run. +// +// See https://go.dev/doc/fuzz for documentation about fuzzing. +// +// # Skipping +// +// Tests or benchmarks may be skipped at run time with a call to +// the Skip method of *T or *B: +// +// func TestTimeConsuming(t *testing.T) { +// if testing.Short() { +// t.Skip("skipping test in short mode.") +// } +// ... +// } +// +// The Skip method of *T can be used in a fuzz target if the input is invalid, +// but should not be considered a failing input. For example: +// +// func FuzzJSONMarshaling(f *testing.F) { +// f.Fuzz(func(t *testing.T, b []byte) { +// var v interface{} +// if err := json.Unmarshal(b, &v); err != nil { +// t.Skip() +// } +// if _, err := json.Marshal(v); err != nil { +// t.Errorf("Marshal: %v", err) +// } +// }) +// } +// +// # Subtests and Sub-benchmarks +// +// The Run methods of T and B allow defining subtests and sub-benchmarks, +// without having to define separate functions for each. This enables uses +// like table-driven benchmarks and creating hierarchical tests. +// It also provides a way to share common setup and tear-down code: +// +// func TestFoo(t *testing.T) { +// // +// t.Run("A=1", func(t *testing.T) { ... }) +// t.Run("A=2", func(t *testing.T) { ... }) +// t.Run("B=1", func(t *testing.T) { ... }) +// // +// } +// +// Each subtest and sub-benchmark has a unique name: the combination of the name +// of the top-level test and the sequence of names passed to Run, separated by +// slashes, with an optional trailing sequence number for disambiguation. +// +// The argument to the -run, -bench, and -fuzz command-line flags is an unanchored regular +// expression that matches the test's name. For tests with multiple slash-separated +// elements, such as subtests, the argument is itself slash-separated, with +// expressions matching each name element in turn. Because it is unanchored, an +// empty expression matches any string. +// For example, using "matching" to mean "whose name contains": +// +// go test -run '' # Run all tests. +// go test -run Foo # Run top-level tests matching "Foo", such as "TestFooBar". +// go test -run Foo/A= # For top-level tests matching "Foo", run subtests matching "A=". +// go test -run /A=1 # For all top-level tests, run subtests matching "A=1". +// go test -fuzz FuzzFoo # Fuzz the target matching "FuzzFoo" +// +// The -run argument can also be used to run a specific value in the seed +// corpus, for debugging. For example: +// +// go test -run=FuzzFoo/9ddb952d9814 +// +// The -fuzz and -run flags can both be set, in order to fuzz a target but +// skip the execution of all other tests. +// +// Subtests can also be used to control parallelism. A parent test will only +// complete once all of its subtests complete. In this example, all tests are +// run in parallel with each other, and only with each other, regardless of +// other top-level tests that may be defined: +// +// func TestGroupedParallel(t *testing.T) { +// for _, tc := range tests { +// tc := tc // capture range variable +// t.Run(tc.Name, func(t *testing.T) { +// t.Parallel() +// ... +// }) +// } +// } +// +// Run does not return until parallel subtests have completed, providing a way +// to clean up after a group of parallel tests: +// +// func TestTeardownParallel(t *testing.T) { +// // This Run will not return until the parallel tests finish. +// t.Run("group", func(t *testing.T) { +// t.Run("Test1", parallelTest1) +// t.Run("Test2", parallelTest2) +// t.Run("Test3", parallelTest3) +// }) +// // +// } +// +// # Main +// +// It is sometimes necessary for a test or benchmark program to do extra setup or teardown +// before or after it executes. It is also sometimes necessary to control +// which code runs on the main thread. To support these and other cases, +// if a test file contains a function: +// +// func TestMain(m *testing.M) +// +// then the generated test will call TestMain(m) instead of running the tests or benchmarks +// directly. TestMain runs in the main goroutine and can do whatever setup +// and teardown is necessary around a call to m.Run. m.Run will return an exit +// code that may be passed to os.Exit. If TestMain returns, the test wrapper +// will pass the result of m.Run to os.Exit itself. +// +// When TestMain is called, flag.Parse has not been run. If TestMain depends on +// command-line flags, including those of the testing package, it should call +// flag.Parse explicitly. Command line flags are always parsed by the time test +// or benchmark functions run. +// +// A simple implementation of TestMain is: +// +// func TestMain(m *testing.M) { +// // call flag.Parse() here if TestMain uses flags +// m.Run() +// } +// +// TestMain is a low-level primitive and should not be necessary for casual +// testing needs, where ordinary test functions suffice. +package testing + +import ( + "bytes" + "context" + "errors" + "flag" + "fmt" + "io" + "math/rand" + "os" + "path/filepath" + "reflect" + "runtime" + "runtime/debug" + "runtime/trace" + "slices" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + "unicode" + "unicode/utf8" + + "github.com/CodSpeedHQ/codspeed-go/testing/internal/goexperiment" + "github.com/CodSpeedHQ/codspeed-go/testing/internal/race" +) + +var initRan bool + +// Init registers testing flags. These flags are automatically registered by +// the "go test" command before running test functions, so Init is only needed +// when calling functions such as Benchmark without using "go test". +// +// Init is not safe to call concurrently. It has no effect if it was already called. +func Init() { + if initRan { + return + } + initRan = true + // The short flag requests that tests run more quickly, but its functionality + // is provided by test writers themselves. The testing package is just its + // home. The all.bash installation script sets it to make installation more + // efficient, but by default the flag is off so a plain "go test" will do a + // full test of the package. + short = flag.Bool("test.short", false, "run smaller test suite to save time") + + // The failfast flag requests that test execution stop after the first test failure. + failFast = flag.Bool("test.failfast", false, "do not start new tests after the first test failure") + + // The directory in which to create profile files and the like. When run from + // "go test", the binary always runs in the source directory for the package; + // this flag lets "go test" tell the binary to write the files in the directory where + // the "go test" command is run. + outputDir = flag.String("test.outputdir", "", "write profiles to `dir`") + // Report as tests are run; default is silent for success. + flag.Var(&chatty, "test.v", "verbose: print additional output") + count = flag.Uint("test.count", 1, "run tests and benchmarks `n` times") + coverProfile = flag.String("test.coverprofile", "", "write a coverage profile to `file`") + gocoverdir = flag.String("test.gocoverdir", "", "write coverage intermediate files to this directory") + matchList = flag.String("test.list", "", "list tests, examples, and benchmarks matching `regexp` then exit") + match = flag.String("test.run", "", "run only tests and examples matching `regexp`") + skip = flag.String("test.skip", "", "do not list or run tests matching `regexp`") + memProfile = flag.String("test.memprofile", "", "write an allocation profile to `file`") + memProfileRate = flag.Int("test.memprofilerate", 0, "set memory allocation profiling `rate` (see runtime.MemProfileRate)") + cpuProfile = flag.String("test.cpuprofile", "", "write a cpu profile to `file`") + blockProfile = flag.String("test.blockprofile", "", "write a goroutine blocking profile to `file`") + blockProfileRate = flag.Int("test.blockprofilerate", 1, "set blocking profile `rate` (see runtime.SetBlockProfileRate)") + mutexProfile = flag.String("test.mutexprofile", "", "write a mutex contention profile to the named file after execution") + mutexProfileFraction = flag.Int("test.mutexprofilefraction", 1, "if >= 0, calls runtime.SetMutexProfileFraction()") + panicOnExit0 = flag.Bool("test.paniconexit0", false, "panic on call to os.Exit(0)") + traceFile = flag.String("test.trace", "", "write an execution trace to `file`") + timeout = flag.Duration("test.timeout", 0, "panic test binary after duration `d` (default 0, timeout disabled)") + cpuListStr = flag.String("test.cpu", "", "comma-separated `list` of cpu counts to run each test with") + parallel = flag.Int("test.parallel", runtime.GOMAXPROCS(0), "run at most `n` tests in parallel") + testlog = flag.String("test.testlogfile", "", "write test action log to `file` (for use only by cmd/go)") + shuffle = flag.String("test.shuffle", "off", "randomize the execution order of tests and benchmarks") + fullPath = flag.Bool("test.fullpath", false, "show full file names in error messages") + + initBenchmarkFlags() + initFuzzFlags() +} + +var ( + // Flags, registered during Init. + short *bool + failFast *bool + outputDir *string + chatty chattyFlag + count *uint + coverProfile *string + gocoverdir *string + matchList *string + match *string + skip *string + memProfile *string + memProfileRate *int + cpuProfile *string + blockProfile *string + blockProfileRate *int + mutexProfile *string + mutexProfileFraction *int + panicOnExit0 *bool + traceFile *string + timeout *time.Duration + cpuListStr *string + parallel *int + shuffle *string + testlog *string + fullPath *bool + + haveExamples bool // are there examples? + + cpuList []int + testlogFile *os.File + + numFailed atomic.Uint32 // number of test failures + + running sync.Map // map[string]time.Time of running, unpaused tests +) + +type chattyFlag struct { + on bool // -v is set in some form + json bool // -v=test2json is set, to make output better for test2json +} + +func (*chattyFlag) IsBoolFlag() bool { return true } + +func (f *chattyFlag) Set(arg string) error { + switch arg { + default: + return fmt.Errorf("invalid flag -test.v=%s", arg) + case "true", "test2json": + f.on = true + f.json = arg == "test2json" + case "false": + f.on = false + f.json = false + } + return nil +} + +func (f *chattyFlag) String() string { + if f.json { + return "test2json" + } + if f.on { + return "true" + } + return "false" +} + +func (f *chattyFlag) Get() any { + if f.json { + return "test2json" + } + return f.on +} + +const marker = byte(0x16) // ^V for framing + +func (f *chattyFlag) prefix() string { + if f.json { + return string(marker) + } + return "" +} + +type chattyPrinter struct { + w io.Writer + lastNameMu sync.Mutex // guards lastName + lastName string // last printed test name in chatty mode + json bool // -v=json output mode +} + +func newChattyPrinter(w io.Writer) *chattyPrinter { + return &chattyPrinter{w: w, json: chatty.json} +} + +// prefix is like chatty.prefix but using p.json instead of chatty.json. +// Using p.json allows tests to check the json behavior without modifying +// the global variable. For convenience, we allow p == nil and treat +// that as not in json mode (because it's not chatty at all). +func (p *chattyPrinter) prefix() string { + if p != nil && p.json { + return string(marker) + } + return "" +} + +// Updatef prints a message about the status of the named test to w. +// +// The formatted message must include the test name itself. +func (p *chattyPrinter) Updatef(testName, format string, args ...any) { + p.lastNameMu.Lock() + defer p.lastNameMu.Unlock() + + // Since the message already implies an association with a specific new test, + // we don't need to check what the old test name was or log an extra NAME line + // for it. (We're updating it anyway, and the current message already includes + // the test name.) + p.lastName = testName + fmt.Fprintf(p.w, p.prefix()+format, args...) +} + +// Printf prints a message, generated by the named test, that does not +// necessarily mention that tests's name itself. +func (p *chattyPrinter) Printf(testName, format string, args ...any) { + p.lastNameMu.Lock() + defer p.lastNameMu.Unlock() + + if p.lastName == "" { + p.lastName = testName + } else if p.lastName != testName { + fmt.Fprintf(p.w, "%s=== NAME %s\n", p.prefix(), testName) + p.lastName = testName + } + + fmt.Fprintf(p.w, format, args...) +} + +// The maximum number of stack frames to go through when skipping helper functions for +// the purpose of decorating log messages. +const maxStackLen = 50 + +// common holds the elements common between T and B and +// captures common methods such as Errorf. +type common struct { + mu sync.RWMutex // guards this group of fields + output []byte // Output generated by test or benchmark. + w io.Writer // For flushToParent. + ran bool // Test or benchmark (or one of its subtests) was executed. + failed bool // Test or benchmark has failed. + skipped bool // Test or benchmark has been skipped. + done bool // Test is finished and all subtests have completed. + helperPCs map[uintptr]struct{} // functions to be skipped when writing file/line info + helperNames map[string]struct{} // helperPCs converted to function names + cleanups []func() // optional functions to be called at the end of the test + cleanupName string // Name of the cleanup function. + cleanupPc []uintptr // The stack trace at the point where Cleanup was called. + finished bool // Test function has completed. + inFuzzFn bool // Whether the fuzz target, if this is one, is running. + + chatty *chattyPrinter // A copy of chattyPrinter, if the chatty flag is set. + bench bool // Whether the current test is a benchmark. + hasSub atomic.Bool // whether there are sub-benchmarks. + cleanupStarted atomic.Bool // Registered cleanup callbacks have started to execute + runner string // Function name of tRunner running the test. + isParallel bool // Whether the test is parallel. + + parent *common + level int // Nesting depth of test or benchmark. + creator []uintptr // If level > 0, the stack trace at the point where the parent called t.Run. + name string // Name of test or benchmark. + start highPrecisionTime // Time test or benchmark started + duration time.Duration + barrier chan bool // To signal parallel subtests they may start. Nil when T.Parallel is not present (B) or not usable (when fuzzing). + signal chan bool // To signal a test is done. + sub []*T // Queue of subtests to be run in parallel. + + lastRaceErrors atomic.Int64 // Max value of race.Errors seen during the test or its subtests. + raceErrorLogged atomic.Bool + + tempDirMu sync.Mutex + tempDir string + tempDirErr error + tempDirSeq int32 + + ctx context.Context + cancelCtx context.CancelFunc + + codspeedTimePerRoundNs []time.Duration + codspeedItersPerRound []int64 +} + +// Short reports whether the -test.short flag is set. +func Short() bool { + if short == nil { + panic("testing: Short called before Init") + } + // Catch code that calls this from TestMain without first calling flag.Parse. + if !flag.Parsed() { + panic("testing: Short called before Parse") + } + + return *short +} + +// testBinary is set by cmd/go to "1" if this is a binary built by "go test". +// The value is set to "1" by a -X option to cmd/link. We assume that +// because this is possible, the compiler will not optimize testBinary +// into a constant on the basis that it is an unexported package-scope +// variable that is never changed. If the compiler ever starts implementing +// such an optimization, we will need some technique to mark this variable +// as "changed by a cmd/link -X option". +var testBinary = "0" + +// Testing reports whether the current code is being run in a test. +// This will report true in programs created by "go test", +// false in programs created by "go build". +func Testing() bool { + return testBinary == "1" +} + +// CoverMode reports what the test coverage mode is set to. The +// values are "set", "count", or "atomic". The return value will be +// empty if test coverage is not enabled. +func CoverMode() string { + if goexperiment.CoverageRedesign { + return cover2.mode + } + return cover.Mode +} + +// Verbose reports whether the -test.v flag is set. +func Verbose() bool { + // Same as in Short. + if !flag.Parsed() { + panic("testing: Verbose called before Parse") + } + return chatty.on +} + +func (c *common) checkFuzzFn(name string) { + if c.inFuzzFn { + panic(fmt.Sprintf("testing: f.%s was called inside the fuzz target, use t.%s instead", name, name)) + } +} + +// frameSkip searches, starting after skip frames, for the first caller frame +// in a function not marked as a helper and returns that frame. +// The search stops if it finds a tRunner function that +// was the entry point into the test and the test is not a subtest. +// This function must be called with c.mu held. +func (c *common) frameSkip(skip int) runtime.Frame { + // If the search continues into the parent test, we'll have to hold + // its mu temporarily. If we then return, we need to unlock it. + shouldUnlock := false + defer func() { + if shouldUnlock { + c.mu.Unlock() + } + }() + var pc [maxStackLen]uintptr + // Skip two extra frames to account for this function + // and runtime.Callers itself. + n := runtime.Callers(skip+2, pc[:]) + if n == 0 { + panic("testing: zero callers found") + } + frames := runtime.CallersFrames(pc[:n]) + var firstFrame, prevFrame, frame runtime.Frame + for more := true; more; prevFrame = frame { + frame, more = frames.Next() + if frame.Function == "runtime.gopanic" { + continue + } + if frame.Function == c.cleanupName { + frames = runtime.CallersFrames(c.cleanupPc) + continue + } + if firstFrame.PC == 0 { + firstFrame = frame + } + if frame.Function == c.runner { + // We've gone up all the way to the tRunner calling + // the test function (so the user must have + // called tb.Helper from inside that test function). + // If this is a top-level test, only skip up to the test function itself. + // If we're in a subtest, continue searching in the parent test, + // starting from the point of the call to Run which created this subtest. + if c.level > 1 { + frames = runtime.CallersFrames(c.creator) + parent := c.parent + // We're no longer looking at the current c after this point, + // so we should unlock its mu, unless it's the original receiver, + // in which case our caller doesn't expect us to do that. + if shouldUnlock { + c.mu.Unlock() + } + c = parent + // Remember to unlock c.mu when we no longer need it, either + // because we went up another nesting level, or because we + // returned. + shouldUnlock = true + c.mu.Lock() + continue + } + return prevFrame + } + // If more helper PCs have been added since we last did the conversion + if c.helperNames == nil { + c.helperNames = make(map[string]struct{}) + for pc := range c.helperPCs { + c.helperNames[pcToName(pc)] = struct{}{} + } + } + if _, ok := c.helperNames[frame.Function]; !ok { + // Found a frame that wasn't inside a helper function. + return frame + } + } + return firstFrame +} + +// decorate prefixes the string with the file and line of the call site +// and inserts the final newline if needed and indentation spaces for formatting. +// This function must be called with c.mu held. +func (c *common) decorate(s string, skip int) string { + frame := c.frameSkip(skip) + file := frame.File + line := frame.Line + if file != "" { + if *fullPath { + // If relative path, truncate file name at last file name separator. + } else if index := strings.LastIndexAny(file, `/\`); index >= 0 { + file = file[index+1:] + } + } else { + file = "???" + } + if line == 0 { + line = 1 + } + buf := new(strings.Builder) + // Every line is indented at least 4 spaces. + buf.WriteString(" ") + fmt.Fprintf(buf, "%s:%d: ", file, line) + lines := strings.Split(s, "\n") + if l := len(lines); l > 1 && lines[l-1] == "" { + lines = lines[:l-1] + } + for i, line := range lines { + if i > 0 { + // Second and subsequent lines are indented an additional 4 spaces. + buf.WriteString("\n ") + } + buf.WriteString(line) + } + buf.WriteByte('\n') + return buf.String() +} + +// flushToParent writes c.output to the parent after first writing the header +// with the given format and arguments. +func (c *common) flushToParent(testName, format string, args ...any) { + p := c.parent + p.mu.Lock() + defer p.mu.Unlock() + + c.mu.Lock() + defer c.mu.Unlock() + + if len(c.output) > 0 { + // Add the current c.output to the print, + // and then arrange for the print to replace c.output. + // (This displays the logged output after the --- FAIL line.) + format += "%s" + args = append(args[:len(args):len(args)], c.output) + c.output = c.output[:0] + } + + if c.chatty != nil && (p.w == c.chatty.w || c.chatty.json) { + // We're flushing to the actual output, so track that this output is + // associated with a specific test (and, specifically, that the next output + // is *not* associated with that test). + // + // Moreover, if c.output is non-empty it is important that this write be + // atomic with respect to the output of other tests, so that we don't end up + // with confusing '=== NAME' lines in the middle of our '--- PASS' block. + // Neither humans nor cmd/test2json can parse those easily. + // (See https://go.dev/issue/40771.) + // + // If test2json is used, we never flush to parent tests, + // so that the json stream shows subtests as they finish. + // (See https://go.dev/issue/29811.) + c.chatty.Updatef(testName, format, args...) + } else { + // We're flushing to the output buffer of the parent test, which will + // itself follow a test-name header when it is finally flushed to stdout. + fmt.Fprintf(p.w, c.chatty.prefix()+format, args...) + } +} + +type indenter struct { + c *common +} + +func (w indenter) Write(b []byte) (n int, err error) { + n = len(b) + for len(b) > 0 { + end := bytes.IndexByte(b, '\n') + if end == -1 { + end = len(b) + } else { + end++ + } + // An indent of 4 spaces will neatly align the dashes with the status + // indicator of the parent. + line := b[:end] + if line[0] == marker { + w.c.output = append(w.c.output, marker) + line = line[1:] + } + const indent = " " + w.c.output = append(w.c.output, indent...) + w.c.output = append(w.c.output, line...) + b = b[end:] + } + return +} + +// fmtDuration returns a string representing d in the form "87.00s". +func fmtDuration(d time.Duration) string { + return fmt.Sprintf("%.2fs", d.Seconds()) +} + +// TB is the interface common to T, B, and F. +type TB interface { + Cleanup(func()) + Error(args ...any) + Errorf(format string, args ...any) + Fail() + FailNow() + Failed() bool + Fatal(args ...any) + Fatalf(format string, args ...any) + Helper() + Log(args ...any) + Logf(format string, args ...any) + Name() string + Setenv(key, value string) + Chdir(dir string) + Skip(args ...any) + SkipNow() + Skipf(format string, args ...any) + Skipped() bool + TempDir() string + Context() context.Context + + // A private method to prevent users implementing the + // interface and so future additions to it will not + // violate Go 1 compatibility. + private() +} + +var _ TB = (*T)(nil) +var _ TB = (*B)(nil) + +// T is a type passed to Test functions to manage test state and support formatted test logs. +// +// A test ends when its Test function returns or calls any of the methods +// FailNow, Fatal, Fatalf, SkipNow, Skip, or Skipf. Those methods, as well as +// the Parallel method, must be called only from the goroutine running the +// Test function. +// +// The other reporting methods, such as the variations of Log and Error, +// may be called simultaneously from multiple goroutines. +type T struct { + common + denyParallel bool + tstate *testState // For running tests and subtests. +} + +func (c *common) private() {} + +// Name returns the name of the running (sub-) test or benchmark. +// +// The name will include the name of the test along with the names of +// any nested sub-tests. If two sibling sub-tests have the same name, +// Name will append a suffix to guarantee the returned name is unique. +func (c *common) Name() string { + return c.name +} + +func (c *common) setRan() { + if c.parent != nil { + c.parent.setRan() + } + c.mu.Lock() + defer c.mu.Unlock() + c.ran = true +} + +// Fail marks the function as having failed but continues execution. +func (c *common) Fail() { + if c.parent != nil { + c.parent.Fail() + } + c.mu.Lock() + defer c.mu.Unlock() + // c.done needs to be locked to synchronize checks to c.done in parent tests. + if c.done { + panic("Fail in goroutine after " + c.name + " has completed") + } + c.failed = true +} + +// Failed reports whether the function has failed. +func (c *common) Failed() bool { + c.mu.RLock() + defer c.mu.RUnlock() + + if !c.done && int64(race.Errors()) > c.lastRaceErrors.Load() { + c.mu.RUnlock() + c.checkRaces() + c.mu.RLock() + } + + return c.failed +} + +// FailNow marks the function as having failed and stops its execution +// by calling runtime.Goexit (which then runs all deferred calls in the +// current goroutine). +// Execution will continue at the next test or benchmark. +// FailNow must be called from the goroutine running the +// test or benchmark function, not from other goroutines +// created during the test. Calling FailNow does not stop +// those other goroutines. +func (c *common) FailNow() { + c.checkFuzzFn("FailNow") + c.Fail() + + // Calling runtime.Goexit will exit the goroutine, which + // will run the deferred functions in this goroutine, + // which will eventually run the deferred lines in tRunner, + // which will signal to the test loop that this test is done. + // + // A previous version of this code said: + // + // c.duration = ... + // c.signal <- c.self + // runtime.Goexit() + // + // This previous version duplicated code (those lines are in + // tRunner no matter what), but worse the goroutine teardown + // implicit in runtime.Goexit was not guaranteed to complete + // before the test exited. If a test deferred an important cleanup + // function (like removing temporary files), there was no guarantee + // it would run on a test failure. Because we send on c.signal during + // a top-of-stack deferred function now, we know that the send + // only happens after any other stacked defers have completed. + c.mu.Lock() + c.finished = true + c.mu.Unlock() + runtime.Goexit() +} + +// log generates the output. It's always at the same stack depth. +func (c *common) log(s string) { + c.logDepth(s, 3) // logDepth + log + public function +} + +// logDepth generates the output at an arbitrary stack depth. +func (c *common) logDepth(s string, depth int) { + c.mu.Lock() + defer c.mu.Unlock() + if c.done { + // This test has already finished. Try and log this message + // with our parent. If we don't have a parent, panic. + for parent := c.parent; parent != nil; parent = parent.parent { + parent.mu.Lock() + defer parent.mu.Unlock() + if !parent.done { + parent.output = append(parent.output, parent.decorate(s, depth+1)...) + return + } + } + panic("Log in goroutine after " + c.name + " has completed: " + s) + } else { + if c.chatty != nil { + if c.bench { + // Benchmarks don't print === CONT, so we should skip the test + // printer and just print straight to stdout. + fmt.Print(c.decorate(s, depth+1)) + } else { + c.chatty.Printf(c.name, "%s", c.decorate(s, depth+1)) + } + + return + } + c.output = append(c.output, c.decorate(s, depth+1)...) + } +} + +// Log formats its arguments using default formatting, analogous to Println, +// and records the text in the error log. For tests, the text will be printed only if +// the test fails or the -test.v flag is set. For benchmarks, the text is always +// printed to avoid having performance depend on the value of the -test.v flag. +func (c *common) Log(args ...any) { + c.checkFuzzFn("Log") + c.log(fmt.Sprintln(args...)) +} + +// Logf formats its arguments according to the format, analogous to Printf, and +// records the text in the error log. A final newline is added if not provided. For +// tests, the text will be printed only if the test fails or the -test.v flag is +// set. For benchmarks, the text is always printed to avoid having performance +// depend on the value of the -test.v flag. +func (c *common) Logf(format string, args ...any) { + c.checkFuzzFn("Logf") + c.log(fmt.Sprintf(format, args...)) +} + +// Error is equivalent to Log followed by Fail. +func (c *common) Error(args ...any) { + c.checkFuzzFn("Error") + c.log(fmt.Sprintln(args...)) + c.Fail() +} + +// Errorf is equivalent to Logf followed by Fail. +func (c *common) Errorf(format string, args ...any) { + c.checkFuzzFn("Errorf") + c.log(fmt.Sprintf(format, args...)) + c.Fail() +} + +// Fatal is equivalent to Log followed by FailNow. +func (c *common) Fatal(args ...any) { + c.checkFuzzFn("Fatal") + c.log(fmt.Sprintln(args...)) + c.FailNow() +} + +// Fatalf is equivalent to Logf followed by FailNow. +func (c *common) Fatalf(format string, args ...any) { + c.checkFuzzFn("Fatalf") + c.log(fmt.Sprintf(format, args...)) + c.FailNow() +} + +// Skip is equivalent to Log followed by SkipNow. +func (c *common) Skip(args ...any) { + c.checkFuzzFn("Skip") + c.log(fmt.Sprintln(args...)) + c.SkipNow() +} + +// Skipf is equivalent to Logf followed by SkipNow. +func (c *common) Skipf(format string, args ...any) { + c.checkFuzzFn("Skipf") + c.log(fmt.Sprintf(format, args...)) + c.SkipNow() +} + +// SkipNow marks the test as having been skipped and stops its execution +// by calling [runtime.Goexit]. +// If a test fails (see Error, Errorf, Fail) and is then skipped, +// it is still considered to have failed. +// Execution will continue at the next test or benchmark. See also FailNow. +// SkipNow must be called from the goroutine running the test, not from +// other goroutines created during the test. Calling SkipNow does not stop +// those other goroutines. +func (c *common) SkipNow() { + c.checkFuzzFn("SkipNow") + c.mu.Lock() + c.skipped = true + c.finished = true + c.mu.Unlock() + runtime.Goexit() +} + +// Skipped reports whether the test was skipped. +func (c *common) Skipped() bool { + c.mu.RLock() + defer c.mu.RUnlock() + return c.skipped +} + +// Helper marks the calling function as a test helper function. +// When printing file and line information, that function will be skipped. +// Helper may be called simultaneously from multiple goroutines. +func (c *common) Helper() { + c.mu.Lock() + defer c.mu.Unlock() + if c.helperPCs == nil { + c.helperPCs = make(map[uintptr]struct{}) + } + // repeating code from callerName here to save walking a stack frame + var pc [1]uintptr + n := runtime.Callers(2, pc[:]) // skip runtime.Callers + Helper + if n == 0 { + panic("testing: zero callers found") + } + if _, found := c.helperPCs[pc[0]]; !found { + c.helperPCs[pc[0]] = struct{}{} + c.helperNames = nil // map will be recreated next time it is needed + } +} + +// Cleanup registers a function to be called when the test (or subtest) and all its +// subtests complete. Cleanup functions will be called in last added, +// first called order. +func (c *common) Cleanup(f func()) { + c.checkFuzzFn("Cleanup") + var pc [maxStackLen]uintptr + // Skip two extra frames to account for this function and runtime.Callers itself. + n := runtime.Callers(2, pc[:]) + cleanupPc := pc[:n] + + fn := func() { + defer func() { + c.mu.Lock() + defer c.mu.Unlock() + c.cleanupName = "" + c.cleanupPc = nil + }() + + name := callerName(0) + c.mu.Lock() + c.cleanupName = name + c.cleanupPc = cleanupPc + c.mu.Unlock() + + f() + } + + c.mu.Lock() + defer c.mu.Unlock() + c.cleanups = append(c.cleanups, fn) +} + +// TempDir returns a temporary directory for the test to use. +// The directory is automatically removed when the test and +// all its subtests complete. +// Each subsequent call to t.TempDir returns a unique directory; +// if the directory creation fails, TempDir terminates the test by calling Fatal. +func (c *common) TempDir() string { + c.checkFuzzFn("TempDir") + // Use a single parent directory for all the temporary directories + // created by a test, each numbered sequentially. + c.tempDirMu.Lock() + var nonExistent bool + if c.tempDir == "" { // Usually the case with js/wasm + nonExistent = true + } else { + _, err := os.Stat(c.tempDir) + nonExistent = os.IsNotExist(err) + if err != nil && !nonExistent { + c.Fatalf("TempDir: %v", err) + } + } + + if nonExistent { + c.Helper() + + // Drop unusual characters (such as path separators or + // characters interacting with globs) from the directory name to + // avoid surprising os.MkdirTemp behavior. + mapper := func(r rune) rune { + if r < utf8.RuneSelf { + const allowed = "!#$%&()+,-.=@^_{}~ " + if '0' <= r && r <= '9' || + 'a' <= r && r <= 'z' || + 'A' <= r && r <= 'Z' { + return r + } + if strings.ContainsRune(allowed, r) { + return r + } + } else if unicode.IsLetter(r) || unicode.IsNumber(r) { + return r + } + return -1 + } + pattern := strings.Map(mapper, c.Name()) + c.tempDir, c.tempDirErr = os.MkdirTemp("", pattern) + if c.tempDirErr == nil { + c.Cleanup(func() { + if err := removeAll(c.tempDir); err != nil { + c.Errorf("TempDir RemoveAll cleanup: %v", err) + } + }) + } + } + + if c.tempDirErr == nil { + c.tempDirSeq++ + } + seq := c.tempDirSeq + c.tempDirMu.Unlock() + + if c.tempDirErr != nil { + c.Fatalf("TempDir: %v", c.tempDirErr) + } + + dir := fmt.Sprintf("%s%c%03d", c.tempDir, os.PathSeparator, seq) + if err := os.Mkdir(dir, 0777); err != nil { + c.Fatalf("TempDir: %v", err) + } + return dir +} + +// removeAll is like os.RemoveAll, but retries Windows "Access is denied." +// errors up to an arbitrary timeout. +// +// Those errors have been known to occur spuriously on at least the +// windows-amd64-2012 builder (https://go.dev/issue/50051), and can only occur +// legitimately if the test leaves behind a temp file that either is still open +// or the test otherwise lacks permission to delete. In the case of legitimate +// failures, a failing test may take a bit longer to fail, but once the test is +// fixed the extra latency will go away. +func removeAll(path string) error { + const arbitraryTimeout = 2 * time.Second + var ( + start time.Time + nextSleep = 1 * time.Millisecond + ) + for { + err := os.RemoveAll(path) + if !isWindowsRetryable(err) { + return err + } + if start.IsZero() { + start = time.Now() + } else if d := time.Since(start) + nextSleep; d >= arbitraryTimeout { + return err + } + time.Sleep(nextSleep) + nextSleep += time.Duration(rand.Int63n(int64(nextSleep))) + } +} + +// Setenv calls os.Setenv(key, value) and uses Cleanup to +// restore the environment variable to its original value +// after the test. +// +// Because Setenv affects the whole process, it cannot be used +// in parallel tests or tests with parallel ancestors. +func (c *common) Setenv(key, value string) { + c.checkFuzzFn("Setenv") + prevValue, ok := os.LookupEnv(key) + + if err := os.Setenv(key, value); err != nil { + c.Fatalf("cannot set environment variable: %v", err) + } + + if ok { + c.Cleanup(func() { + os.Setenv(key, prevValue) + }) + } else { + c.Cleanup(func() { + os.Unsetenv(key) + }) + } +} + +// Chdir calls os.Chdir(dir) and uses Cleanup to restore the current +// working directory to its original value after the test. On Unix, it +// also sets PWD environment variable for the duration of the test. +// +// Because Chdir affects the whole process, it cannot be used +// in parallel tests or tests with parallel ancestors. +func (c *common) Chdir(dir string) { + c.checkFuzzFn("Chdir") + oldwd, err := os.Open(".") + if err != nil { + c.Fatal(err) + } + if err := os.Chdir(dir); err != nil { + c.Fatal(err) + } + // On POSIX platforms, PWD represents “an absolute pathname of the + // current working directory.” Since we are changing the working + // directory, we should also set or update PWD to reflect that. + switch runtime.GOOS { + case "windows", "plan9": + // Windows and Plan 9 do not use the PWD variable. + default: + if !filepath.IsAbs(dir) { + dir, err = os.Getwd() + if err != nil { + c.Fatal(err) + } + } + c.Setenv("PWD", dir) + } + c.Cleanup(func() { + err := oldwd.Chdir() + oldwd.Close() + if err != nil { + // It's not safe to continue with tests if we can't + // get back to the original working directory. Since + // we are holding a dirfd, this is highly unlikely. + panic("testing.Chdir: " + err.Error()) + } + }) +} + +// Context returns a context that is canceled just before +// Cleanup-registered functions are called. +// +// Cleanup functions can wait for any resources +// that shut down on Context.Done before the test or benchmark completes. +func (c *common) Context() context.Context { + c.checkFuzzFn("Context") + return c.ctx +} + +// panicHandling controls the panic handling used by runCleanup. +type panicHandling int + +const ( + normalPanic panicHandling = iota + recoverAndReturnPanic +) + +// runCleanup is called at the end of the test. +// If ph is recoverAndReturnPanic, it will catch panics, and return the +// recovered value if any. +func (c *common) runCleanup(ph panicHandling) (panicVal any) { + c.cleanupStarted.Store(true) + defer c.cleanupStarted.Store(false) + + if ph == recoverAndReturnPanic { + defer func() { + panicVal = recover() + }() + } + + // Make sure that if a cleanup function panics, + // we still run the remaining cleanup functions. + defer func() { + c.mu.Lock() + recur := len(c.cleanups) > 0 + c.mu.Unlock() + if recur { + c.runCleanup(normalPanic) + } + }() + + if c.cancelCtx != nil { + c.cancelCtx() + } + + for { + var cleanup func() + c.mu.Lock() + if len(c.cleanups) > 0 { + last := len(c.cleanups) - 1 + cleanup = c.cleanups[last] + c.cleanups = c.cleanups[:last] + } + c.mu.Unlock() + if cleanup == nil { + return nil + } + cleanup() + } +} + +// resetRaces updates c.parent's count of data race errors (or the global count, +// if c has no parent), and updates c.lastRaceErrors to match. +// +// Any races that occurred prior to this call to resetRaces will +// not be attributed to c. +func (c *common) resetRaces() { + if c.parent == nil { + c.lastRaceErrors.Store(int64(race.Errors())) + } else { + c.lastRaceErrors.Store(c.parent.checkRaces()) + } +} + +// checkRaces checks whether the global count of data race errors has increased +// since c's count was last reset. +// +// If so, it marks c as having failed due to those races (logging an error for +// the first such race), and updates the race counts for the parents of c so +// that if they are currently suspended (such as in a call to T.Run) they will +// not log separate errors for the race(s). +// +// Note that multiple tests may be marked as failed due to the same race if they +// are executing in parallel. +func (c *common) checkRaces() (raceErrors int64) { + raceErrors = int64(race.Errors()) + for { + last := c.lastRaceErrors.Load() + if raceErrors <= last { + // All races have already been reported. + return raceErrors + } + if c.lastRaceErrors.CompareAndSwap(last, raceErrors) { + break + } + } + + if c.raceErrorLogged.CompareAndSwap(false, true) { + // This is the first race we've encountered for this test. + // Mark the test as failed, and log the reason why only once. + // (Note that the race detector itself will still write a goroutine + // dump for any further races it detects.) + c.Errorf("race detected during execution of test") + } + + // Update the parent(s) of this test so that they don't re-report the race. + parent := c.parent + for parent != nil { + for { + last := parent.lastRaceErrors.Load() + if raceErrors <= last { + // This race was already reported by another (likely parallel) subtest. + return raceErrors + } + if parent.lastRaceErrors.CompareAndSwap(last, raceErrors) { + break + } + } + parent = parent.parent + } + + return raceErrors +} + +// callerName gives the function name (qualified with a package path) +// for the caller after skip frames (where 0 means the current function). +func callerName(skip int) string { + var pc [1]uintptr + n := runtime.Callers(skip+2, pc[:]) // skip + runtime.Callers + callerName + if n == 0 { + panic("testing: zero callers found") + } + return pcToName(pc[0]) +} + +func pcToName(pc uintptr) string { + pcs := []uintptr{pc} + frames := runtime.CallersFrames(pcs) + frame, _ := frames.Next() + return frame.Function +} + +const parallelConflict = `testing: test using t.Setenv or t.Chdir can not use t.Parallel` + +// Parallel signals that this test is to be run in parallel with (and only with) +// other parallel tests. When a test is run multiple times due to use of +// -test.count or -test.cpu, multiple instances of a single test never run in +// parallel with each other. +func (t *T) Parallel() { + if t.isParallel { + panic("testing: t.Parallel called multiple times") + } + if t.denyParallel { + panic(parallelConflict) + } + t.isParallel = true + if t.parent.barrier == nil { + // T.Parallel has no effect when fuzzing. + // Multiple processes may run in parallel, but only one input can run at a + // time per process so we can attribute crashes to specific inputs. + return + } + + // We don't want to include the time we spend waiting for serial tests + // in the test duration. Record the elapsed time thus far and reset the + // timer afterwards. + t.duration += highPrecisionTimeSince(t.start) + + // Add to the list of tests to be released by the parent. + t.parent.sub = append(t.parent.sub, t) + + // Report any races during execution of this test up to this point. + // + // We will assume that any races that occur between here and the point where + // we unblock are not caused by this subtest. That assumption usually holds, + // although it can be wrong if the test spawns a goroutine that races in the + // background while the rest of the test is blocked on the call to Parallel. + // If that happens, we will misattribute the background race to some other + // test, or to no test at all — but that false-negative is so unlikely that it + // is not worth adding race-report noise for the common case where the test is + // completely suspended during the call to Parallel. + t.checkRaces() + + if t.chatty != nil { + t.chatty.Updatef(t.name, "=== PAUSE %s\n", t.name) + } + running.Delete(t.name) + + t.signal <- true // Release calling test. + <-t.parent.barrier // Wait for the parent test to complete. + t.tstate.waitParallel() + + if t.chatty != nil { + t.chatty.Updatef(t.name, "=== CONT %s\n", t.name) + } + running.Store(t.name, highPrecisionTimeNow()) + t.start = highPrecisionTimeNow() + + // Reset the local race counter to ignore any races that happened while this + // goroutine was blocked, such as in the parent test or in other parallel + // subtests. + // + // (Note that we don't call parent.checkRaces here: + // if other parallel subtests have already introduced races, we want to + // let them report those races instead of attributing them to the parent.) + t.lastRaceErrors.Store(int64(race.Errors())) +} + +func (t *T) checkParallel() { + // Non-parallel subtests that have parallel ancestors may still + // run in parallel with other tests: they are only non-parallel + // with respect to the other subtests of the same parent. + // Since calls like SetEnv or Chdir affects the whole process, we need + // to deny those if the current test or any parent is parallel. + for c := &t.common; c != nil; c = c.parent { + if c.isParallel { + panic(parallelConflict) + } + } + + t.denyParallel = true +} + +// Setenv calls os.Setenv(key, value) and uses Cleanup to +// restore the environment variable to its original value +// after the test. +// +// Because Setenv affects the whole process, it cannot be used +// in parallel tests or tests with parallel ancestors. +func (t *T) Setenv(key, value string) { + t.checkParallel() + t.common.Setenv(key, value) +} + +// Chdir calls os.Chdir(dir) and uses Cleanup to restore the current +// working directory to its original value after the test. On Unix, it +// also sets PWD environment variable for the duration of the test. +// +// Because Chdir affects the whole process, it cannot be used +// in parallel tests or tests with parallel ancestors. +func (t *T) Chdir(dir string) { + t.checkParallel() + t.common.Chdir(dir) +} + +// InternalTest is an internal type but exported because it is cross-package; +// it is part of the implementation of the "go test" command. +type InternalTest struct { + Name string + F func(*T) +} + +var errNilPanicOrGoexit = errors.New("test executed panic(nil) or runtime.Goexit") + +func tRunner(t *T, fn func(t *T)) { + t.runner = callerName(0) + + // When this goroutine is done, either because fn(t) + // returned normally or because a test failure triggered + // a call to runtime.Goexit, record the duration and send + // a signal saying that the test is done. + defer func() { + t.checkRaces() + + // TODO(#61034): This is the wrong place for this check. + if t.Failed() { + numFailed.Add(1) + } + + // Check if the test panicked or Goexited inappropriately. + // + // If this happens in a normal test, print output but continue panicking. + // tRunner is called in its own goroutine, so this terminates the process. + // + // If this happens while fuzzing, recover from the panic and treat it like a + // normal failure. It's important that the process keeps running in order to + // find short inputs that cause panics. + err := recover() + signal := true + + t.mu.RLock() + finished := t.finished + t.mu.RUnlock() + if !finished && err == nil { + err = errNilPanicOrGoexit + for p := t.parent; p != nil; p = p.parent { + p.mu.RLock() + finished = p.finished + p.mu.RUnlock() + if finished { + if !t.isParallel { + t.Errorf("%v: subtest may have called FailNow on a parent test", err) + err = nil + } + signal = false + break + } + } + } + + if err != nil && t.tstate.isFuzzing { + prefix := "panic: " + if err == errNilPanicOrGoexit { + prefix = "" + } + t.Errorf("%s%s\n%s\n", prefix, err, string(debug.Stack())) + t.mu.Lock() + t.finished = true + t.mu.Unlock() + err = nil + } + + // Use a deferred call to ensure that we report that the test is + // complete even if a cleanup function calls t.FailNow. See issue 41355. + didPanic := false + defer func() { + // Only report that the test is complete if it doesn't panic, + // as otherwise the test binary can exit before the panic is + // reported to the user. See issue 41479. + if didPanic { + return + } + if err != nil { + panic(err) + } + running.Delete(t.name) + t.signal <- signal + }() + + doPanic := func(err any) { + t.Fail() + if r := t.runCleanup(recoverAndReturnPanic); r != nil { + t.Logf("cleanup panicked with %v", r) + } + // Flush the output log up to the root before dying. + for root := &t.common; root.parent != nil; root = root.parent { + root.mu.Lock() + root.duration += highPrecisionTimeSince(root.start) + d := root.duration + root.mu.Unlock() + root.flushToParent(root.name, "--- FAIL: %s (%s)\n", root.name, fmtDuration(d)) + if r := root.parent.runCleanup(recoverAndReturnPanic); r != nil { + fmt.Fprintf(root.parent.w, "cleanup panicked with %v", r) + } + } + didPanic = true + panic(err) + } + if err != nil { + doPanic(err) + } + + t.duration += highPrecisionTimeSince(t.start) + + if len(t.sub) > 0 { + // Run parallel subtests. + + // Decrease the running count for this test and mark it as no longer running. + t.tstate.release() + running.Delete(t.name) + + // Release the parallel subtests. + close(t.barrier) + // Wait for subtests to complete. + for _, sub := range t.sub { + <-sub.signal + } + + // Run any cleanup callbacks, marking the test as running + // in case the cleanup hangs. + cleanupStart := highPrecisionTimeNow() + running.Store(t.name, cleanupStart) + err := t.runCleanup(recoverAndReturnPanic) + t.duration += highPrecisionTimeSince(cleanupStart) + if err != nil { + doPanic(err) + } + t.checkRaces() + if !t.isParallel { + // Reacquire the count for sequential tests. See comment in Run. + t.tstate.waitParallel() + } + } else if t.isParallel { + // Only release the count for this test if it was run as a parallel + // test. See comment in Run method. + t.tstate.release() + } + t.report() // Report after all subtests have finished. + + // Do not lock t.done to allow race detector to detect race in case + // the user does not appropriately synchronize a goroutine. + t.done = true + if t.parent != nil && !t.hasSub.Load() { + t.setRan() + } + }() + defer func() { + if len(t.sub) == 0 { + t.runCleanup(normalPanic) + } + }() + + t.start = highPrecisionTimeNow() + t.resetRaces() + fn(t) + + // code beyond here will not be executed when FailNow is invoked + t.mu.Lock() + t.finished = true + t.mu.Unlock() +} + +// Run runs f as a subtest of t called name. It runs f in a separate goroutine +// and blocks until f returns or calls t.Parallel to become a parallel test. +// Run reports whether f succeeded (or at least did not fail before calling t.Parallel). +// +// Run may be called simultaneously from multiple goroutines, but all such calls +// must return before the outer test function for t returns. +func (t *T) Run(name string, f func(t *T)) bool { + if t.cleanupStarted.Load() { + panic("testing: t.Run called during t.Cleanup") + } + + t.hasSub.Store(true) + testName, ok, _ := t.tstate.match.fullName(&t.common, name) + if !ok || shouldFailFast() { + return true + } + // Record the stack trace at the point of this call so that if the subtest + // function - which runs in a separate stack - is marked as a helper, we can + // continue walking the stack into the parent test. + var pc [maxStackLen]uintptr + n := runtime.Callers(2, pc[:]) + + // There's no reason to inherit this context from parent. The user's code can't observe + // the difference between the background context and the one from the parent test. + ctx, cancelCtx := context.WithCancel(context.Background()) + t = &T{ + common: common{ + barrier: make(chan bool), + signal: make(chan bool, 1), + name: testName, + parent: &t.common, + level: t.level + 1, + creator: pc[:n], + chatty: t.chatty, + ctx: ctx, + cancelCtx: cancelCtx, + }, + tstate: t.tstate, + } + t.w = indenter{&t.common} + + if t.chatty != nil { + t.chatty.Updatef(t.name, "=== RUN %s\n", t.name) + } + running.Store(t.name, highPrecisionTimeNow()) + + // Instead of reducing the running count of this test before calling the + // tRunner and increasing it afterwards, we rely on tRunner keeping the + // count correct. This ensures that a sequence of sequential tests runs + // without being preempted, even when their parent is a parallel test. This + // may especially reduce surprises if *parallel == 1. + go tRunner(t, f) + + // The parent goroutine will block until the subtest either finishes or calls + // Parallel, but in general we don't know whether the parent goroutine is the + // top-level test function or some other goroutine it has spawned. + // To avoid confusing false-negatives, we leave the parent in the running map + // even though in the typical case it is blocked. + + if !<-t.signal { + // At this point, it is likely that FailNow was called on one of the + // parent tests by one of the subtests. Continue aborting up the chain. + runtime.Goexit() + } + + if t.chatty != nil && t.chatty.json { + t.chatty.Updatef(t.parent.name, "=== NAME %s\n", t.parent.name) + } + return !t.failed +} + +// Deadline reports the time at which the test binary will have +// exceeded the timeout specified by the -timeout flag. +// +// The ok result is false if the -timeout flag indicates “no timeout” (0). +func (t *T) Deadline() (deadline time.Time, ok bool) { + deadline = t.tstate.deadline + return deadline, !deadline.IsZero() +} + +// testState holds all fields that are common to all tests. This includes +// synchronization primitives to run at most *parallel tests. +type testState struct { + match *matcher + deadline time.Time + + // isFuzzing is true in the state used when generating random inputs + // for fuzz targets. isFuzzing is false when running normal tests and + // when running fuzz tests as unit tests (without -fuzz or when -fuzz + // does not match). + isFuzzing bool + + mu sync.Mutex + + // Channel used to signal tests that are ready to be run in parallel. + startParallel chan bool + + // running is the number of tests currently running in parallel. + // This does not include tests that are waiting for subtests to complete. + running int + + // numWaiting is the number tests waiting to be run in parallel. + numWaiting int + + // maxParallel is a copy of the parallel flag. + maxParallel int +} + +func newTestState(maxParallel int, m *matcher) *testState { + return &testState{ + match: m, + startParallel: make(chan bool), + maxParallel: maxParallel, + running: 1, // Set the count to 1 for the main (sequential) test. + } +} + +func (s *testState) waitParallel() { + s.mu.Lock() + if s.running < s.maxParallel { + s.running++ + s.mu.Unlock() + return + } + s.numWaiting++ + s.mu.Unlock() + <-s.startParallel +} + +func (s *testState) release() { + s.mu.Lock() + if s.numWaiting == 0 { + s.running-- + s.mu.Unlock() + return + } + s.numWaiting-- + s.mu.Unlock() + s.startParallel <- true // Pick a waiting test to be run. +} + +// No one should be using func Main anymore. +// See the doc comment on func Main and use MainStart instead. +var errMain = errors.New("testing: unexpected use of func Main") + +type matchStringOnly func(pat, str string) (bool, error) + +func (f matchStringOnly) MatchString(pat, str string) (bool, error) { return f(pat, str) } +func (f matchStringOnly) StartCPUProfile(w io.Writer) error { return errMain } +func (f matchStringOnly) StopCPUProfile() {} +func (f matchStringOnly) WriteProfileTo(string, io.Writer, int) error { return errMain } +func (f matchStringOnly) ImportPath() string { return "" } +func (f matchStringOnly) StartTestLog(io.Writer) {} +func (f matchStringOnly) StopTestLog() error { return errMain } +func (f matchStringOnly) SetPanicOnExit0(bool) {} +func (f matchStringOnly) CoordinateFuzzing(time.Duration, int64, time.Duration, int64, int, []corpusEntry, []reflect.Type, string, string) error { + return errMain +} +func (f matchStringOnly) RunFuzzWorker(func(corpusEntry) error) error { return errMain } +func (f matchStringOnly) ReadCorpus(string, []reflect.Type) ([]corpusEntry, error) { + return nil, errMain +} +func (f matchStringOnly) CheckCorpus([]any, []reflect.Type) error { return nil } +func (f matchStringOnly) ResetCoverage() {} +func (f matchStringOnly) SnapshotCoverage() {} + +func (f matchStringOnly) InitRuntimeCoverage() (mode string, tearDown func(string, string) (string, error), snapcov func() float64) { + return +} + +// Main is an internal function, part of the implementation of the "go test" command. +// It was exported because it is cross-package and predates "internal" packages. +// It is no longer used by "go test" but preserved, as much as possible, for other +// systems that simulate "go test" using Main, but Main sometimes cannot be updated as +// new functionality is added to the testing package. +// Systems simulating "go test" should be updated to use MainStart. +func Main(matchString func(pat, str string) (bool, error), tests []InternalTest, benchmarks []InternalBenchmark, examples []InternalExample) { + os.Exit(MainStart(matchStringOnly(matchString), tests, benchmarks, nil, examples).Run()) +} + +// M is a type passed to a TestMain function to run the actual tests. +type M struct { + deps testDeps + tests []InternalTest + benchmarks []InternalBenchmark + fuzzTargets []InternalFuzzTarget + examples []InternalExample + + timer *time.Timer + afterOnce sync.Once + + numRun int + + // value to pass to os.Exit, the outer test func main + // harness calls os.Exit with this code. See #34129. + exitCode int +} + +// testDeps is an internal interface of functionality that is +// passed into this package by a test's generated main package. +// The canonical implementation of this interface is +// testing/internal/testdeps's TestDeps. +type testDeps interface { + ImportPath() string + MatchString(pat, str string) (bool, error) + SetPanicOnExit0(bool) + StartCPUProfile(io.Writer) error + StopCPUProfile() + StartTestLog(io.Writer) + StopTestLog() error + WriteProfileTo(string, io.Writer, int) error + CoordinateFuzzing(time.Duration, int64, time.Duration, int64, int, []corpusEntry, []reflect.Type, string, string) error + RunFuzzWorker(func(corpusEntry) error) error + ReadCorpus(string, []reflect.Type) ([]corpusEntry, error) + CheckCorpus([]any, []reflect.Type) error + ResetCoverage() + SnapshotCoverage() + InitRuntimeCoverage() (mode string, tearDown func(coverprofile string, gocoverdir string) (string, error), snapcov func() float64) +} + +// MainStart is meant for use by tests generated by 'go test'. +// It is not meant to be called directly and is not subject to the Go 1 compatibility document. +// It may change signature from release to release. +func MainStart(deps testDeps, tests []InternalTest, benchmarks []InternalBenchmark, fuzzTargets []InternalFuzzTarget, examples []InternalExample) *M { + registerCover2(deps.InitRuntimeCoverage()) + Init() + return &M{ + deps: deps, + tests: tests, + benchmarks: benchmarks, + fuzzTargets: fuzzTargets, + examples: examples, + } +} + +var testingTesting bool +var realStderr *os.File + +// Run runs the tests. It returns an exit code to pass to os.Exit. +func (m *M) Run() (code int) { + defer func() { + code = m.exitCode + }() + + // Count the number of calls to m.Run. + // We only ever expected 1, but we didn't enforce that, + // and now there are tests in the wild that call m.Run multiple times. + // Sigh. go.dev/issue/23129. + m.numRun++ + + // TestMain may have already called flag.Parse. + if !flag.Parsed() { + flag.Parse() + } + + if chatty.json { + // With -v=json, stdout and stderr are pointing to the same pipe, + // which is leading into test2json. In general, operating systems + // do a good job of ensuring that writes to the same pipe through + // different file descriptors are delivered whole, so that writing + // AAA to stdout and BBB to stderr simultaneously produces + // AAABBB or BBBAAA on the pipe, not something like AABBBA. + // However, the exception to this is when the pipe fills: in that + // case, Go's use of non-blocking I/O means that writing AAA + // or BBB might be split across multiple system calls, making it + // entirely possible to get output like AABBBA. The same problem + // happens inside the operating system kernel if we switch to + // blocking I/O on the pipe. This interleaved output can do things + // like print unrelated messages in the middle of a TestFoo line, + // which confuses test2json. Setting os.Stderr = os.Stdout will make + // them share a single pfd, which will hold a lock for each program + // write, preventing any interleaving. + // + // It might be nice to set Stderr = Stdout always, or perhaps if + // we can tell they are the same file, but for now -v=json is + // a very clear signal. Making the two files the same may cause + // surprises if programs close os.Stdout but expect to be able + // to continue to write to os.Stderr, but it's hard to see why a + // test would think it could take over global state that way. + // + // This fix only helps programs where the output is coming directly + // from Go code. It does not help programs in which a subprocess is + // writing to stderr or stdout at the same time that a Go test is writing output. + // It also does not help when the output is coming from the runtime, + // such as when using the print/println functions, since that code writes + // directly to fd 2 without any locking. + // We keep realStderr around to prevent fd 2 from being closed. + // + // See go.dev/issue/33419. + realStderr = os.Stderr + os.Stderr = os.Stdout + } + + if *parallel < 1 { + fmt.Fprintln(os.Stderr, "testing: -parallel can only be given a positive integer") + flag.Usage() + m.exitCode = 2 + return + } + if *matchFuzz != "" && *fuzzCacheDir == "" { + fmt.Fprintln(os.Stderr, "testing: -test.fuzzcachedir must be set if -test.fuzz is set") + flag.Usage() + m.exitCode = 2 + return + } + + if *matchList != "" { + listTests(m.deps.MatchString, m.tests, m.benchmarks, m.fuzzTargets, m.examples) + m.exitCode = 0 + return + } + + if *shuffle != "off" { + var n int64 + var err error + if *shuffle == "on" { + n = time.Now().UnixNano() + } else { + n, err = strconv.ParseInt(*shuffle, 10, 64) + if err != nil { + fmt.Fprintln(os.Stderr, `testing: -shuffle should be "off", "on", or a valid integer:`, err) + m.exitCode = 2 + return + } + } + fmt.Println("-test.shuffle", n) + rng := rand.New(rand.NewSource(n)) + rng.Shuffle(len(m.tests), func(i, j int) { m.tests[i], m.tests[j] = m.tests[j], m.tests[i] }) + rng.Shuffle(len(m.benchmarks), func(i, j int) { m.benchmarks[i], m.benchmarks[j] = m.benchmarks[j], m.benchmarks[i] }) + } + + parseCpuList() + + m.before() + defer m.after() + + // Run tests, examples, and benchmarks unless this is a fuzz worker process. + // Workers start after this is done by their parent process, and they should + // not repeat this work. + if !*isFuzzWorker { + deadline := m.startAlarm() + haveExamples = len(m.examples) > 0 + testRan, testOk := runTests(m.deps.MatchString, m.tests, deadline) + fuzzTargetsRan, fuzzTargetsOk := runFuzzTests(m.deps, m.fuzzTargets, deadline) + exampleRan, exampleOk := runExamples(m.deps.MatchString, m.examples) + m.stopAlarm() + if !testRan && !exampleRan && !fuzzTargetsRan && *matchBenchmarks == "" && *matchFuzz == "" { + fmt.Fprintln(os.Stderr, "testing: warning: no tests to run") + if testingTesting && *match != "^$" { + // If this happens during testing of package testing it could be that + // package testing's own logic for when to run a test is broken, + // in which case every test will run nothing and succeed, + // with no obvious way to detect this problem (since no tests are running). + // So make 'no tests to run' a hard failure when testing package testing itself. + fmt.Print(chatty.prefix(), "FAIL: package testing must run tests\n") + testOk = false + } + } + anyFailed := !testOk || !exampleOk || !fuzzTargetsOk || !runBenchmarks(m.deps.ImportPath(), m.deps.MatchString, m.benchmarks) + if !anyFailed && race.Errors() > 0 { + fmt.Print(chatty.prefix(), "testing: race detected outside of test execution\n") + anyFailed = true + } + if anyFailed { + fmt.Print(chatty.prefix(), "FAIL\n") + m.exitCode = 1 + return + } + } + + fuzzingOk := runFuzzing(m.deps, m.fuzzTargets) + if !fuzzingOk { + fmt.Print(chatty.prefix(), "FAIL\n") + if *isFuzzWorker { + m.exitCode = fuzzWorkerExitCode + } else { + m.exitCode = 1 + } + return + } + + m.exitCode = 0 + if !*isFuzzWorker { + fmt.Print(chatty.prefix(), "PASS\n") + } + return +} + +func (t *T) report() { + if t.parent == nil { + return + } + dstr := fmtDuration(t.duration) + format := "--- %s: %s (%s)\n" + if t.Failed() { + t.flushToParent(t.name, format, "FAIL", t.name, dstr) + } else if t.chatty != nil { + if t.Skipped() { + t.flushToParent(t.name, format, "SKIP", t.name, dstr) + } else { + t.flushToParent(t.name, format, "PASS", t.name, dstr) + } + } +} + +func listTests(matchString func(pat, str string) (bool, error), tests []InternalTest, benchmarks []InternalBenchmark, fuzzTargets []InternalFuzzTarget, examples []InternalExample) { + if _, err := matchString(*matchList, "non-empty"); err != nil { + fmt.Fprintf(os.Stderr, "testing: invalid regexp in -test.list (%q): %s\n", *matchList, err) + os.Exit(1) + } + + for _, test := range tests { + if ok, _ := matchString(*matchList, test.Name); ok { + fmt.Println(test.Name) + } + } + for _, bench := range benchmarks { + if ok, _ := matchString(*matchList, bench.Name); ok { + fmt.Println(bench.Name) + } + } + for _, fuzzTarget := range fuzzTargets { + if ok, _ := matchString(*matchList, fuzzTarget.Name); ok { + fmt.Println(fuzzTarget.Name) + } + } + for _, example := range examples { + if ok, _ := matchString(*matchList, example.Name); ok { + fmt.Println(example.Name) + } + } +} + +// RunTests is an internal function but exported because it is cross-package; +// it is part of the implementation of the "go test" command. +func RunTests(matchString func(pat, str string) (bool, error), tests []InternalTest) (ok bool) { + var deadline time.Time + if *timeout > 0 { + deadline = time.Now().Add(*timeout) + } + ran, ok := runTests(matchString, tests, deadline) + if !ran && !haveExamples { + fmt.Fprintln(os.Stderr, "testing: warning: no tests to run") + } + return ok +} + +func runTests(matchString func(pat, str string) (bool, error), tests []InternalTest, deadline time.Time) (ran, ok bool) { + ok = true + for _, procs := range cpuList { + runtime.GOMAXPROCS(procs) + for i := uint(0); i < *count; i++ { + if shouldFailFast() { + break + } + if i > 0 && !ran { + // There were no tests to run on the first + // iteration. This won't change, so no reason + // to keep trying. + break + } + ctx, cancelCtx := context.WithCancel(context.Background()) + tstate := newTestState(*parallel, newMatcher(matchString, *match, "-test.run", *skip)) + tstate.deadline = deadline + t := &T{ + common: common{ + signal: make(chan bool, 1), + barrier: make(chan bool), + w: os.Stdout, + ctx: ctx, + cancelCtx: cancelCtx, + }, + tstate: tstate, + } + if Verbose() { + t.chatty = newChattyPrinter(t.w) + } + tRunner(t, func(t *T) { + for _, test := range tests { + t.Run(test.Name, test.F) + } + }) + select { + case <-t.signal: + default: + panic("internal error: tRunner exited without sending on t.signal") + } + ok = ok && !t.Failed() + ran = ran || t.ran + } + } + return ran, ok +} + +// before runs before all testing. +func (m *M) before() { + if *memProfileRate > 0 { + runtime.MemProfileRate = *memProfileRate + } + if *cpuProfile != "" { + f, err := os.Create(toOutputDir(*cpuProfile)) + if err != nil { + fmt.Fprintf(os.Stderr, "testing: %s\n", err) + return + } + if err := m.deps.StartCPUProfile(f); err != nil { + fmt.Fprintf(os.Stderr, "testing: can't start cpu profile: %s\n", err) + f.Close() + return + } + // Could save f so after can call f.Close; not worth the effort. + } + if *traceFile != "" { + f, err := os.Create(toOutputDir(*traceFile)) + if err != nil { + fmt.Fprintf(os.Stderr, "testing: %s\n", err) + return + } + if err := trace.Start(f); err != nil { + fmt.Fprintf(os.Stderr, "testing: can't start tracing: %s\n", err) + f.Close() + return + } + // Could save f so after can call f.Close; not worth the effort. + } + if *blockProfile != "" && *blockProfileRate >= 0 { + runtime.SetBlockProfileRate(*blockProfileRate) + } + if *mutexProfile != "" && *mutexProfileFraction >= 0 { + runtime.SetMutexProfileFraction(*mutexProfileFraction) + } + if *coverProfile != "" && CoverMode() == "" { + fmt.Fprintf(os.Stderr, "testing: cannot use -test.coverprofile because test binary was not built with coverage enabled\n") + os.Exit(2) + } + if *gocoverdir != "" && CoverMode() == "" { + fmt.Fprintf(os.Stderr, "testing: cannot use -test.gocoverdir because test binary was not built with coverage enabled\n") + os.Exit(2) + } + if *testlog != "" { + // Note: Not using toOutputDir. + // This file is for use by cmd/go, not users. + var f *os.File + var err error + if m.numRun == 1 { + f, err = os.Create(*testlog) + } else { + f, err = os.OpenFile(*testlog, os.O_WRONLY, 0) + if err == nil { + f.Seek(0, io.SeekEnd) + } + } + if err != nil { + fmt.Fprintf(os.Stderr, "testing: %s\n", err) + os.Exit(2) + } + m.deps.StartTestLog(f) + testlogFile = f + } + if *panicOnExit0 { + m.deps.SetPanicOnExit0(true) + } +} + +// after runs after all testing. +func (m *M) after() { + m.afterOnce.Do(func() { + m.writeProfiles() + }) + + // Restore PanicOnExit0 after every run, because we set it to true before + // every run. Otherwise, if m.Run is called multiple times the behavior of + // os.Exit(0) will not be restored after the second run. + if *panicOnExit0 { + m.deps.SetPanicOnExit0(false) + } +} + +func (m *M) writeProfiles() { + if *testlog != "" { + if err := m.deps.StopTestLog(); err != nil { + fmt.Fprintf(os.Stderr, "testing: can't write %s: %s\n", *testlog, err) + os.Exit(2) + } + if err := testlogFile.Close(); err != nil { + fmt.Fprintf(os.Stderr, "testing: can't write %s: %s\n", *testlog, err) + os.Exit(2) + } + } + if *cpuProfile != "" { + m.deps.StopCPUProfile() // flushes profile to disk + } + if *traceFile != "" { + trace.Stop() // flushes trace to disk + } + if *memProfile != "" { + f, err := os.Create(toOutputDir(*memProfile)) + if err != nil { + fmt.Fprintf(os.Stderr, "testing: %s\n", err) + os.Exit(2) + } + runtime.GC() // materialize all statistics + if err = m.deps.WriteProfileTo("allocs", f, 0); err != nil { + fmt.Fprintf(os.Stderr, "testing: can't write %s: %s\n", *memProfile, err) + os.Exit(2) + } + f.Close() + } + if *blockProfile != "" && *blockProfileRate >= 0 { + f, err := os.Create(toOutputDir(*blockProfile)) + if err != nil { + fmt.Fprintf(os.Stderr, "testing: %s\n", err) + os.Exit(2) + } + if err = m.deps.WriteProfileTo("block", f, 0); err != nil { + fmt.Fprintf(os.Stderr, "testing: can't write %s: %s\n", *blockProfile, err) + os.Exit(2) + } + f.Close() + } + if *mutexProfile != "" && *mutexProfileFraction >= 0 { + f, err := os.Create(toOutputDir(*mutexProfile)) + if err != nil { + fmt.Fprintf(os.Stderr, "testing: %s\n", err) + os.Exit(2) + } + if err = m.deps.WriteProfileTo("mutex", f, 0); err != nil { + fmt.Fprintf(os.Stderr, "testing: can't write %s: %s\n", *mutexProfile, err) + os.Exit(2) + } + f.Close() + } + if CoverMode() != "" { + coverReport() + } +} + +// toOutputDir returns the file name relocated, if required, to outputDir. +// Simple implementation to avoid pulling in path/filepath. +func toOutputDir(path string) string { + if *outputDir == "" || path == "" { + return path + } + // On Windows, it's clumsy, but we can be almost always correct + // by just looking for a drive letter and a colon. + // Absolute paths always have a drive letter (ignoring UNC). + // Problem: if path == "C:A" and outputdir == "C:\Go" it's unclear + // what to do, but even then path/filepath doesn't help. + // TODO: Worth doing better? Probably not, because we're here only + // under the management of go test. + if runtime.GOOS == "windows" && len(path) >= 2 { + letter, colon := path[0], path[1] + if ('a' <= letter && letter <= 'z' || 'A' <= letter && letter <= 'Z') && colon == ':' { + // If path starts with a drive letter we're stuck with it regardless. + return path + } + } + if os.IsPathSeparator(path[0]) { + return path + } + return fmt.Sprintf("%s%c%s", *outputDir, os.PathSeparator, path) +} + +// startAlarm starts an alarm if requested. +func (m *M) startAlarm() time.Time { + if *timeout <= 0 { + return time.Time{} + } + + deadline := time.Now().Add(*timeout) + m.timer = time.AfterFunc(*timeout, func() { + m.after() + debug.SetTraceback("all") + extra := "" + + if list := runningList(); len(list) > 0 { + var b strings.Builder + b.WriteString("\nrunning tests:") + for _, name := range list { + b.WriteString("\n\t") + b.WriteString(name) + } + extra = b.String() + } + panic(fmt.Sprintf("test timed out after %v%s", *timeout, extra)) + }) + return deadline +} + +// runningList returns the list of running tests. +func runningList() []string { + var list []string + running.Range(func(k, v any) bool { + list = append(list, fmt.Sprintf("%s (%v)", k.(string), highPrecisionTimeSince(v.(highPrecisionTime)).Round(time.Second))) + return true + }) + slices.Sort(list) + return list +} + +// stopAlarm turns off the alarm. +func (m *M) stopAlarm() { + if *timeout > 0 { + m.timer.Stop() + } +} + +func parseCpuList() { + for _, val := range strings.Split(*cpuListStr, ",") { + val = strings.TrimSpace(val) + if val == "" { + continue + } + cpu, err := strconv.Atoi(val) + if err != nil || cpu <= 0 { + fmt.Fprintf(os.Stderr, "testing: invalid value %q for -test.cpu\n", val) + os.Exit(1) + } + cpuList = append(cpuList, cpu) + } + if cpuList == nil { + cpuList = append(cpuList, runtime.GOMAXPROCS(-1)) + } +} + +func shouldFailFast() bool { + return *failFast && numFailed.Load() > 0 +} diff --git a/testing/testing/testing_other.go b/testing/testing/testing_other.go new file mode 100644 index 0000000..f91e3b4 --- /dev/null +++ b/testing/testing/testing_other.go @@ -0,0 +1,31 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !windows + +package testing + +import "time" + +// isWindowsRetryable reports whether err is a Windows error code +// that may be fixed by retrying a failed filesystem operation. +func isWindowsRetryable(err error) bool { + return false +} + +// highPrecisionTime represents a single point in time. +// On all systems except Windows, using time.Time is fine. +type highPrecisionTime struct { + now time.Time +} + +// highPrecisionTimeNow returns high precision time for benchmarking. +func highPrecisionTimeNow() highPrecisionTime { + return highPrecisionTime{now: time.Now()} +} + +// highPrecisionTimeSince returns duration since b. +func highPrecisionTimeSince(b highPrecisionTime) time.Duration { + return time.Since(b.now) +} diff --git a/testing/testing/testing_test.go b/testing/testing/testing_test.go new file mode 100644 index 0000000..d414a2a --- /dev/null +++ b/testing/testing/testing_test.go @@ -0,0 +1,1032 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package testing_test + +import ( + "bytes" + "context" + "errors" + "fmt" + "os" + "os/exec" + "path/filepath" + "regexp" + "runtime" + "slices" + "strings" + "sync" + "testing" + "time" + + "github.com/CodSpeedHQ/codspeed-go/testing/internal/race" + "github.com/CodSpeedHQ/codspeed-go/testing/internal/testenv" +) + +// This is exactly what a test would do without a TestMain. +// It's here only so that there is at least one package in the +// standard library with a TestMain, so that code is executed. + +func TestMain(m *testing.M) { + if os.Getenv("GO_WANT_RACE_BEFORE_TESTS") == "1" { + doRace() + } + + m.Run() + + // Note: m.Run currently prints the final "PASS" line, so if any race is + // reported here (after m.Run but before the process exits), it will print + // "PASS", then print the stack traces for the race, then exit with nonzero + // status. + // + // This is a somewhat fundamental race: because the race detector hooks into + // the runtime at a very low level, no matter where we put the printing it + // would be possible to report a race that occurs afterward. However, we could + // theoretically move the printing after TestMain, which would at least do a + // better job of diagnosing races in cleanup functions within TestMain itself. +} + +func TestTempDirInCleanup(t *testing.T) { + var dir string + + t.Run("test", func(t *testing.T) { + t.Cleanup(func() { + dir = t.TempDir() + }) + _ = t.TempDir() + }) + + fi, err := os.Stat(dir) + if fi != nil { + t.Fatalf("Directory %q from user Cleanup still exists", dir) + } + if !os.IsNotExist(err) { + t.Fatalf("Unexpected error: %v", err) + } +} + +func TestTempDirInBenchmark(t *testing.T) { + testing.Benchmark(func(b *testing.B) { + if !b.Run("test", func(b *testing.B) { + // Add a loop so that the test won't fail. See issue 38677. + for i := 0; i < b.N; i++ { + _ = b.TempDir() + } + }) { + t.Fatal("Sub test failure in a benchmark") + } + }) +} + +func TestTempDir(t *testing.T) { + testTempDir(t) + t.Run("InSubtest", testTempDir) + t.Run("test/subtest", testTempDir) + t.Run("test\\subtest", testTempDir) + t.Run("test:subtest", testTempDir) + t.Run("test/..", testTempDir) + t.Run("../test", testTempDir) + t.Run("test[]", testTempDir) + t.Run("test*", testTempDir) + t.Run("äöüéè", testTempDir) +} + +func testTempDir(t *testing.T) { + dirCh := make(chan string, 1) + t.Cleanup(func() { + // Verify directory has been removed. + select { + case dir := <-dirCh: + fi, err := os.Stat(dir) + if os.IsNotExist(err) { + // All good + return + } + if err != nil { + t.Fatal(err) + } + t.Errorf("directory %q still exists: %v, isDir=%v", dir, fi, fi.IsDir()) + default: + if !t.Failed() { + t.Fatal("never received dir channel") + } + } + }) + + dir := t.TempDir() + if dir == "" { + t.Fatal("expected dir") + } + dir2 := t.TempDir() + if dir == dir2 { + t.Fatal("subsequent calls to TempDir returned the same directory") + } + if filepath.Dir(dir) != filepath.Dir(dir2) { + t.Fatalf("calls to TempDir do not share a parent; got %q, %q", dir, dir2) + } + dirCh <- dir + fi, err := os.Stat(dir) + if err != nil { + t.Fatal(err) + } + if !fi.IsDir() { + t.Errorf("dir %q is not a dir", dir) + } + files, err := os.ReadDir(dir) + if err != nil { + t.Fatal(err) + } + if len(files) > 0 { + t.Errorf("unexpected %d files in TempDir: %v", len(files), files) + } + + glob := filepath.Join(dir, "*.txt") + if _, err := filepath.Glob(glob); err != nil { + t.Error(err) + } +} + +func TestSetenv(t *testing.T) { + tests := []struct { + name string + key string + initialValueExists bool + initialValue string + newValue string + }{ + { + name: "initial value exists", + key: "GO_TEST_KEY_1", + initialValueExists: true, + initialValue: "111", + newValue: "222", + }, + { + name: "initial value exists but empty", + key: "GO_TEST_KEY_2", + initialValueExists: true, + initialValue: "", + newValue: "222", + }, + { + name: "initial value is not exists", + key: "GO_TEST_KEY_3", + initialValueExists: false, + initialValue: "", + newValue: "222", + }, + } + + for _, test := range tests { + if test.initialValueExists { + if err := os.Setenv(test.key, test.initialValue); err != nil { + t.Fatalf("unable to set env: got %v", err) + } + } else { + os.Unsetenv(test.key) + } + + t.Run(test.name, func(t *testing.T) { + t.Setenv(test.key, test.newValue) + if os.Getenv(test.key) != test.newValue { + t.Fatalf("unexpected value after t.Setenv: got %s, want %s", os.Getenv(test.key), test.newValue) + } + }) + + got, exists := os.LookupEnv(test.key) + if got != test.initialValue { + t.Fatalf("unexpected value after t.Setenv cleanup: got %s, want %s", got, test.initialValue) + } + if exists != test.initialValueExists { + t.Fatalf("unexpected value after t.Setenv cleanup: got %t, want %t", exists, test.initialValueExists) + } + } +} + +func expectParallelConflict(t *testing.T) { + want := testing.ParallelConflict + if got := recover(); got != want { + t.Fatalf("expected panic; got %#v want %q", got, want) + } +} + +func testWithParallelAfter(t *testing.T, fn func(*testing.T)) { + defer expectParallelConflict(t) + + fn(t) + t.Parallel() +} + +func testWithParallelBefore(t *testing.T, fn func(*testing.T)) { + defer expectParallelConflict(t) + + t.Parallel() + fn(t) +} + +func testWithParallelParentBefore(t *testing.T, fn func(*testing.T)) { + t.Parallel() + + t.Run("child", func(t *testing.T) { + defer expectParallelConflict(t) + + fn(t) + }) +} + +func testWithParallelGrandParentBefore(t *testing.T, fn func(*testing.T)) { + t.Parallel() + + t.Run("child", func(t *testing.T) { + t.Run("grand-child", func(t *testing.T) { + defer expectParallelConflict(t) + + fn(t) + }) + }) +} + +func tSetenv(t *testing.T) { + t.Setenv("GO_TEST_KEY_1", "value") +} + +func TestSetenvWithParallelAfter(t *testing.T) { + testWithParallelAfter(t, tSetenv) +} + +func TestSetenvWithParallelBefore(t *testing.T) { + testWithParallelBefore(t, tSetenv) +} + +func TestSetenvWithParallelParentBefore(t *testing.T) { + testWithParallelParentBefore(t, tSetenv) +} + +func TestSetenvWithParallelGrandParentBefore(t *testing.T) { + testWithParallelGrandParentBefore(t, tSetenv) +} + +func tChdir(t *testing.T) { + t.Chdir(t.TempDir()) +} + +func TestChdirWithParallelAfter(t *testing.T) { + testWithParallelAfter(t, tChdir) +} + +func TestChdirWithParallelBefore(t *testing.T) { + testWithParallelBefore(t, tChdir) +} + +func TestChdirWithParallelParentBefore(t *testing.T) { + testWithParallelParentBefore(t, tChdir) +} + +func TestChdirWithParallelGrandParentBefore(t *testing.T) { + testWithParallelGrandParentBefore(t, tChdir) +} + +func TestChdir(t *testing.T) { + oldDir, err := os.Getwd() + if err != nil { + t.Fatal(err) + } + defer os.Chdir(oldDir) + + // The "relative" test case relies on tmp not being a symlink. + tmp, err := filepath.EvalSymlinks(t.TempDir()) + if err != nil { + t.Fatal(err) + } + rel, err := filepath.Rel(oldDir, tmp) + if err != nil { + // If GOROOT is on C: volume and tmp is on the D: volume, there + // is no relative path between them, so skip that test case. + rel = "skip" + } + + for _, tc := range []struct { + name, dir, pwd string + extraChdir bool + }{ + { + name: "absolute", + dir: tmp, + pwd: tmp, + }, + { + name: "relative", + dir: rel, + pwd: tmp, + }, + { + name: "current (absolute)", + dir: oldDir, + pwd: oldDir, + }, + { + name: "current (relative) with extra os.Chdir", + dir: ".", + pwd: oldDir, + + extraChdir: true, + }, + } { + t.Run(tc.name, func(t *testing.T) { + if tc.dir == "skip" { + t.Skipf("skipping test because there is no relative path between %s and %s", oldDir, tmp) + } + if !filepath.IsAbs(tc.pwd) { + t.Fatalf("Bad tc.pwd: %q (must be absolute)", tc.pwd) + } + + t.Chdir(tc.dir) + + newDir, err := os.Getwd() + if err != nil { + t.Fatal(err) + } + if newDir != tc.pwd { + t.Fatalf("failed to chdir to %q: getwd: got %q, want %q", tc.dir, newDir, tc.pwd) + } + + switch runtime.GOOS { + case "windows", "plan9": + // Windows and Plan 9 do not use the PWD variable. + default: + if pwd := os.Getenv("PWD"); pwd != tc.pwd { + t.Fatalf("PWD: got %q, want %q", pwd, tc.pwd) + } + } + + if tc.extraChdir { + os.Chdir("..") + } + }) + + newDir, err := os.Getwd() + if err != nil { + t.Fatal(err) + } + if newDir != oldDir { + t.Fatalf("failed to restore wd to %s: getwd: %s", oldDir, newDir) + } + } +} + +// testingTrueInInit is part of TestTesting. +var testingTrueInInit = false + +// testingTrueInPackageVarInit is part of TestTesting. +var testingTrueInPackageVarInit = testing.Testing() + +// init is part of TestTesting. +func init() { + if testing.Testing() { + testingTrueInInit = true + } +} + +var testingProg = ` +package main + +import ( + "fmt" + "testing" +) + +func main() { + fmt.Println(testing.Testing()) +} +` + +func TestTesting(t *testing.T) { + if !testing.Testing() { + t.Errorf("testing.Testing() == %t, want %t", testing.Testing(), true) + } + if !testingTrueInInit { + t.Errorf("testing.Testing() called by init function == %t, want %t", testingTrueInInit, true) + } + if !testingTrueInPackageVarInit { + t.Errorf("testing.Testing() variable initialized as %t, want %t", testingTrueInPackageVarInit, true) + } + + if testing.Short() { + t.Skip("skipping building a binary in short mode") + } + testenv.MustHaveGoRun(t) + + fn := filepath.Join(t.TempDir(), "x.go") + if err := os.WriteFile(fn, []byte(testingProg), 0644); err != nil { + t.Fatal(err) + } + + cmd := testenv.Command(t, testenv.GoToolPath(t), "run", fn) + out, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("%v failed: %v\n%s", cmd, err, out) + } + + s := string(bytes.TrimSpace(out)) + if s != "false" { + t.Errorf("in non-test testing.Test() returned %q, want %q", s, "false") + } +} + +// runTest runs a helper test with -test.v, ignoring its exit status. +// runTest both logs and returns the test output. +func runTest(t *testing.T, test string) []byte { + t.Helper() + + testenv.MustHaveExec(t) + + cmd := testenv.Command(t, testenv.Executable(t), "-test.run=^"+test+"$", "-test.bench="+test, "-test.v", "-test.parallel=2", "-test.benchtime=2x") + cmd = testenv.CleanCmdEnv(cmd) + cmd.Env = append(cmd.Env, "GO_WANT_HELPER_PROCESS=1") + out, err := cmd.CombinedOutput() + t.Logf("%v: %v\n%s", cmd, err, out) + + return out +} + +// doRace provokes a data race that generates a race detector report if run +// under the race detector and is otherwise benign. +func doRace() { + var x int + c1 := make(chan bool) + go func() { + x = 1 // racy write + c1 <- true + }() + _ = x // racy read + <-c1 +} + +func TestRaceReports(t *testing.T) { + if os.Getenv("GO_WANT_HELPER_PROCESS") == "1" { + // Generate a race detector report in a sub test. + t.Run("Sub", func(t *testing.T) { + doRace() + }) + return + } + + out := runTest(t, "TestRaceReports") + + // We should see at most one race detector report. + c := bytes.Count(out, []byte("race detected")) + want := 0 + if race.Enabled { + want = 1 + } + if c != want { + t.Errorf("got %d race reports, want %d", c, want) + } +} + +// Issue #60083. This used to fail on the race builder. +func TestRaceName(t *testing.T) { + if os.Getenv("GO_WANT_HELPER_PROCESS") == "1" { + doRace() + return + } + + out := runTest(t, "TestRaceName") + + if regexp.MustCompile(`=== NAME\s*$`).Match(out) { + t.Errorf("incorrectly reported test with no name") + } +} + +func TestRaceSubReports(t *testing.T) { + if os.Getenv("GO_WANT_HELPER_PROCESS") == "1" { + t.Parallel() + c1 := make(chan bool, 1) + t.Run("sub", func(t *testing.T) { + t.Run("subsub1", func(t *testing.T) { + t.Parallel() + doRace() + c1 <- true + }) + t.Run("subsub2", func(t *testing.T) { + t.Parallel() + doRace() + <-c1 + }) + }) + doRace() + return + } + + out := runTest(t, "TestRaceSubReports") + + // There should be three race reports: one for each subtest, and one for the + // race after the subtests complete. Note that because the subtests run in + // parallel, the race stacks may both be printed in with one or the other + // test's logs. + cReport := bytes.Count(out, []byte("race detected during execution of test")) + wantReport := 0 + if race.Enabled { + wantReport = 3 + } + if cReport != wantReport { + t.Errorf("got %d race reports, want %d", cReport, wantReport) + } + + // Regardless of when the stacks are printed, we expect each subtest to be + // marked as failed, and that failure should propagate up to the parents. + cFail := bytes.Count(out, []byte("--- FAIL:")) + wantFail := 0 + if race.Enabled { + wantFail = 4 + } + if cFail != wantFail { + t.Errorf(`got %d "--- FAIL:" lines, want %d`, cReport, wantReport) + } +} + +func TestRaceInCleanup(t *testing.T) { + if os.Getenv("GO_WANT_HELPER_PROCESS") == "1" { + t.Cleanup(doRace) + t.Parallel() + t.Run("sub", func(t *testing.T) { + t.Parallel() + // No race should be reported for sub. + }) + return + } + + out := runTest(t, "TestRaceInCleanup") + + // There should be one race report, for the parent test only. + cReport := bytes.Count(out, []byte("race detected during execution of test")) + wantReport := 0 + if race.Enabled { + wantReport = 1 + } + if cReport != wantReport { + t.Errorf("got %d race reports, want %d", cReport, wantReport) + } + + // Only the parent test should be marked as failed. + // (The subtest does not race, and should pass.) + cFail := bytes.Count(out, []byte("--- FAIL:")) + wantFail := 0 + if race.Enabled { + wantFail = 1 + } + if cFail != wantFail { + t.Errorf(`got %d "--- FAIL:" lines, want %d`, cReport, wantReport) + } +} + +func TestDeepSubtestRace(t *testing.T) { + if os.Getenv("GO_WANT_HELPER_PROCESS") == "1" { + t.Run("sub", func(t *testing.T) { + t.Run("subsub", func(t *testing.T) { + t.Run("subsubsub", func(t *testing.T) { + doRace() + }) + }) + doRace() + }) + return + } + + out := runTest(t, "TestDeepSubtestRace") + + c := bytes.Count(out, []byte("race detected during execution of test")) + want := 0 + // There should be two race reports. + if race.Enabled { + want = 2 + } + if c != want { + t.Errorf("got %d race reports, want %d", c, want) + } +} + +func TestRaceDuringParallelFailsAllSubtests(t *testing.T) { + if os.Getenv("GO_WANT_HELPER_PROCESS") == "1" { + var ready sync.WaitGroup + ready.Add(2) + done := make(chan struct{}) + go func() { + ready.Wait() + doRace() // This race happens while both subtests are running. + close(done) + }() + + t.Run("sub", func(t *testing.T) { + t.Run("subsub1", func(t *testing.T) { + t.Parallel() + ready.Done() + <-done + }) + t.Run("subsub2", func(t *testing.T) { + t.Parallel() + ready.Done() + <-done + }) + }) + + return + } + + out := runTest(t, "TestRaceDuringParallelFailsAllSubtests") + + c := bytes.Count(out, []byte("race detected during execution of test")) + want := 0 + // Each subtest should report the race independently. + if race.Enabled { + want = 2 + } + if c != want { + t.Errorf("got %d race reports, want %d", c, want) + } +} + +func TestRaceBeforeParallel(t *testing.T) { + if os.Getenv("GO_WANT_HELPER_PROCESS") == "1" { + t.Run("sub", func(t *testing.T) { + doRace() + t.Parallel() + }) + return + } + + out := runTest(t, "TestRaceBeforeParallel") + + c := bytes.Count(out, []byte("race detected during execution of test")) + want := 0 + // We should see one race detector report. + if race.Enabled { + want = 1 + } + if c != want { + t.Errorf("got %d race reports, want %d", c, want) + } +} + +func TestRaceBeforeTests(t *testing.T) { + cmd := testenv.Command(t, testenv.Executable(t), "-test.run=^$") + cmd = testenv.CleanCmdEnv(cmd) + cmd.Env = append(cmd.Env, "GO_WANT_RACE_BEFORE_TESTS=1") + out, _ := cmd.CombinedOutput() + t.Logf("%s", out) + + c := bytes.Count(out, []byte("race detected outside of test execution")) + + want := 0 + if race.Enabled { + want = 1 + } + if c != want { + t.Errorf("got %d race reports; want %d", c, want) + } +} + +func TestBenchmarkRace(t *testing.T) { + out := runTest(t, "BenchmarkRacy") + c := bytes.Count(out, []byte("race detected during execution of test")) + + want := 0 + // We should see one race detector report. + if race.Enabled { + want = 1 + } + if c != want { + t.Errorf("got %d race reports; want %d", c, want) + } +} + +func TestBenchmarkRaceBLoop(t *testing.T) { + out := runTest(t, "BenchmarkBLoopRacy") + c := bytes.Count(out, []byte("race detected during execution of test")) + + want := 0 + // We should see one race detector report. + if race.Enabled { + want = 1 + } + if c != want { + t.Errorf("got %d race reports; want %d", c, want) + } +} + +func BenchmarkRacy(b *testing.B) { + if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" { + b.Skipf("skipping intentionally-racy benchmark") + } + for i := 0; i < b.N; i++ { + doRace() + } +} + +func BenchmarkBLoopRacy(b *testing.B) { + if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" { + b.Skipf("skipping intentionally-racy benchmark") + } + for b.Loop() { + doRace() + } +} + +func TestBenchmarkSubRace(t *testing.T) { + out := runTest(t, "BenchmarkSubRacy") + c := bytes.Count(out, []byte("race detected during execution of test")) + + want := 0 + // We should see 3 race detector reports: + // one in the sub-bencmark, one in the parent afterward, + // and one in b.Loop. + if race.Enabled { + want = 3 + } + if c != want { + t.Errorf("got %d race reports; want %d", c, want) + } +} + +func BenchmarkSubRacy(b *testing.B) { + if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" { + b.Skipf("skipping intentionally-racy benchmark") + } + + b.Run("non-racy", func(b *testing.B) { + tot := 0 + for i := 0; i < b.N; i++ { + tot++ + } + _ = tot + }) + + b.Run("racy", func(b *testing.B) { + for i := 0; i < b.N; i++ { + doRace() + } + }) + + b.Run("racy-bLoop", func(b *testing.B) { + for b.Loop() { + doRace() + } + }) + + doRace() // should be reported separately +} + +func TestRunningTests(t *testing.T) { + t.Parallel() + + // Regression test for https://go.dev/issue/64404: + // on timeout, the "running tests" message should not include + // tests that are waiting on parked subtests. + + if os.Getenv("GO_WANT_HELPER_PROCESS") == "1" { + for i := 0; i < 2; i++ { + t.Run(fmt.Sprintf("outer%d", i), func(t *testing.T) { + t.Parallel() + for j := 0; j < 2; j++ { + t.Run(fmt.Sprintf("inner%d", j), func(t *testing.T) { + t.Parallel() + for { + time.Sleep(1 * time.Millisecond) + } + }) + } + }) + } + } + + timeout := 10 * time.Millisecond + for { + cmd := testenv.Command(t, os.Args[0], "-test.run=^"+t.Name()+"$", "-test.timeout="+timeout.String(), "-test.parallel=4") + cmd.Env = append(cmd.Environ(), "GO_WANT_HELPER_PROCESS=1") + out, err := cmd.CombinedOutput() + t.Logf("%v:\n%s", cmd, out) + if _, ok := err.(*exec.ExitError); !ok { + t.Fatal(err) + } + + // Because the outer subtests (and TestRunningTests itself) are marked as + // parallel, their test functions return (and are no longer “running”) + // before the inner subtests are released to run and hang. + // Only those inner subtests should be reported as running. + want := []string{ + "TestRunningTests/outer0/inner0", + "TestRunningTests/outer0/inner1", + "TestRunningTests/outer1/inner0", + "TestRunningTests/outer1/inner1", + } + + got, ok := parseRunningTests(out) + if slices.Equal(got, want) { + break + } + if ok { + t.Logf("found running tests:\n%s\nwant:\n%s", strings.Join(got, "\n"), strings.Join(want, "\n")) + } else { + t.Logf("no running tests found") + } + t.Logf("retrying with longer timeout") + timeout *= 2 + } +} + +func TestRunningTestsInCleanup(t *testing.T) { + t.Parallel() + + if os.Getenv("GO_WANT_HELPER_PROCESS") == "1" { + for i := 0; i < 2; i++ { + t.Run(fmt.Sprintf("outer%d", i), func(t *testing.T) { + // Not parallel: we expect to see only one outer test, + // stuck in cleanup after its subtest finishes. + + t.Cleanup(func() { + for { + time.Sleep(1 * time.Millisecond) + } + }) + + for j := 0; j < 2; j++ { + t.Run(fmt.Sprintf("inner%d", j), func(t *testing.T) { + t.Parallel() + }) + } + }) + } + } + + timeout := 10 * time.Millisecond + for { + cmd := testenv.Command(t, os.Args[0], "-test.run=^"+t.Name()+"$", "-test.timeout="+timeout.String()) + cmd.Env = append(cmd.Environ(), "GO_WANT_HELPER_PROCESS=1") + out, err := cmd.CombinedOutput() + t.Logf("%v:\n%s", cmd, out) + if _, ok := err.(*exec.ExitError); !ok { + t.Fatal(err) + } + + // TestRunningTestsInCleanup is blocked in the call to t.Run, + // but its test function has not yet returned so it should still + // be considered to be running. + // outer1 hasn't even started yet, so only outer0 and the top-level + // test function should be reported as running. + want := []string{ + "TestRunningTestsInCleanup", + "TestRunningTestsInCleanup/outer0", + } + + got, ok := parseRunningTests(out) + if slices.Equal(got, want) { + break + } + if ok { + t.Logf("found running tests:\n%s\nwant:\n%s", strings.Join(got, "\n"), strings.Join(want, "\n")) + } else { + t.Logf("no running tests found") + } + t.Logf("retrying with longer timeout") + timeout *= 2 + } +} + +func parseRunningTests(out []byte) (runningTests []string, ok bool) { + inRunningTests := false + for _, line := range strings.Split(string(out), "\n") { + if inRunningTests { + // Package testing adds one tab, the panic printer adds another. + if trimmed, ok := strings.CutPrefix(line, "\t\t"); ok { + if name, _, ok := strings.Cut(trimmed, " "); ok { + runningTests = append(runningTests, name) + continue + } + } + + // This line is not the name of a running test. + return runningTests, true + } + + if strings.TrimSpace(line) == "running tests:" { + inRunningTests = true + } + } + + return nil, false +} + +func TestConcurrentRun(t *testing.T) { + // Regression test for https://go.dev/issue/64402: + // this deadlocked after https://go.dev/cl/506755. + + block := make(chan struct{}) + var ready, done sync.WaitGroup + for i := 0; i < 2; i++ { + ready.Add(1) + done.Add(1) + go t.Run("", func(*testing.T) { + ready.Done() + <-block + done.Done() + }) + } + ready.Wait() + close(block) + done.Wait() +} + +func TestParentRun(t1 *testing.T) { + // Regression test for https://go.dev/issue/64402: + // this deadlocked after https://go.dev/cl/506755. + + t1.Run("outer", func(t2 *testing.T) { + t2.Log("Hello outer!") + t1.Run("not_inner", func(t3 *testing.T) { // Note: this is t1.Run, not t2.Run. + t3.Log("Hello inner!") + }) + }) +} + +func TestContext(t *testing.T) { + ctx := t.Context() + if err := ctx.Err(); err != nil { + t.Fatalf("expected non-canceled context, got %v", err) + } + + var innerCtx context.Context + t.Run("inner", func(t *testing.T) { + innerCtx = t.Context() + if err := innerCtx.Err(); err != nil { + t.Fatalf("expected inner test to not inherit canceled context, got %v", err) + } + }) + t.Run("inner2", func(t *testing.T) { + if !errors.Is(innerCtx.Err(), context.Canceled) { + t.Fatal("expected context of sibling test to be canceled after its test function finished") + } + }) + + t.Cleanup(func() { + if !errors.Is(ctx.Err(), context.Canceled) { + t.Fatal("expected context canceled before cleanup") + } + }) +} + +func TestBenchmarkBLoopIterationCorrect(t *testing.T) { + out := runTest(t, "BenchmarkBLoopPrint") + c := bytes.Count(out, []byte("Printing from BenchmarkBLoopPrint")) + + want := 2 + if c != want { + t.Errorf("got %d loop iterations; want %d", c, want) + } + + // b.Loop() will only rampup once. + c = bytes.Count(out, []byte("Ramping up from BenchmarkBLoopPrint")) + want = 1 + if c != want { + t.Errorf("got %d loop rampup; want %d", c, want) + } + + re := regexp.MustCompile(`BenchmarkBLoopPrint(-[0-9]+)?\s+2\s+[0-9]+\s+ns/op`) + if !re.Match(out) { + t.Error("missing benchmark output") + } +} + +func TestBenchmarkBNIterationCorrect(t *testing.T) { + out := runTest(t, "BenchmarkBNPrint") + c := bytes.Count(out, []byte("Printing from BenchmarkBNPrint")) + + // runTest sets benchtime=2x, with semantics specified in #32051 it should + // run 3 times. + want := 3 + if c != want { + t.Errorf("got %d loop iterations; want %d", c, want) + } + + // b.N style fixed iteration loop will rampup twice: + // One in run1(), the other in launch + c = bytes.Count(out, []byte("Ramping up from BenchmarkBNPrint")) + want = 2 + if c != want { + t.Errorf("got %d loop rampup; want %d", c, want) + } +} + +func BenchmarkBLoopPrint(b *testing.B) { + b.Logf("Ramping up from BenchmarkBLoopPrint") + for b.Loop() { + b.Logf("Printing from BenchmarkBLoopPrint") + } +} + +func BenchmarkBNPrint(b *testing.B) { + b.Logf("Ramping up from BenchmarkBNPrint") + for i := 0; i < b.N; i++ { + b.Logf("Printing from BenchmarkBNPrint") + } +} diff --git a/testing/testing/testing_windows.go b/testing/testing/testing_windows.go new file mode 100644 index 0000000..3b876c9 --- /dev/null +++ b/testing/testing/testing_windows.go @@ -0,0 +1,71 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build windows + +package testing + +import ( + "errors" + "math/bits" + "syscall" + "time" + + "github.com/CodSpeedHQ/codspeed-go/testing/internal/syscall/windows" +) + +// isWindowsRetryable reports whether err is a Windows error code +// that may be fixed by retrying a failed filesystem operation. +func isWindowsRetryable(err error) bool { + for { + unwrapped := errors.Unwrap(err) + if unwrapped == nil { + break + } + err = unwrapped + } + if err == syscall.ERROR_ACCESS_DENIED { + return true // Observed in https://go.dev/issue/50051. + } + if err == windows.ERROR_SHARING_VIOLATION { + return true // Observed in https://go.dev/issue/51442. + } + return false +} + +// highPrecisionTime represents a single point in time with query performance counter. +// time.Time on Windows has low system granularity, which is not suitable for +// measuring short time intervals. +// +// TODO: If Windows runtime implements high resolution timing then highPrecisionTime +// can be removed. +type highPrecisionTime struct { + now int64 +} + +// highPrecisionTimeNow returns high precision time for benchmarking. +func highPrecisionTimeNow() highPrecisionTime { + var t highPrecisionTime + // This should always succeed for Windows XP and above. + t.now = windows.QueryPerformanceCounter() + return t +} + +func (a highPrecisionTime) sub(b highPrecisionTime) time.Duration { + delta := a.now - b.now + + if queryPerformanceFrequency == 0 { + queryPerformanceFrequency = windows.QueryPerformanceFrequency() + } + hi, lo := bits.Mul64(uint64(delta), uint64(time.Second)/uint64(time.Nanosecond)) + quo, _ := bits.Div64(hi, lo, uint64(queryPerformanceFrequency)) + return time.Duration(quo) +} + +var queryPerformanceFrequency int64 + +// highPrecisionTimeSince returns duration since a. +func highPrecisionTimeSince(a highPrecisionTime) time.Duration { + return highPrecisionTimeNow().sub(a) +} diff --git a/testing/testing/testing_windows_test.go b/testing/testing/testing_windows_test.go new file mode 100644 index 0000000..e75232d --- /dev/null +++ b/testing/testing/testing_windows_test.go @@ -0,0 +1,25 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package testing_test + +import ( + "testing" + "time" +) + +var sink time.Time +var sinkHPT testing.HighPrecisionTime + +func BenchmarkTimeNow(b *testing.B) { + for i := 0; i < b.N; i++ { + sink = time.Now() + } +} + +func BenchmarkHighPrecisionTimeNow(b *testing.B) { + for i := 0; i < b.N; i++ { + sinkHPT = testing.HighPrecisionTimeNow() + } +}