Update to go1.24.0
This commit is contained in:
@@ -5,6 +5,7 @@
|
||||
package testing
|
||||
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
"internal/sysinfo"
|
||||
@@ -78,7 +79,7 @@ type InternalBenchmark struct {
|
||||
}
|
||||
|
||||
// B is a type passed to [Benchmark] functions to manage benchmark
|
||||
// timing and to specify the number of iterations to run.
|
||||
// timing and control the number of iterations.
|
||||
//
|
||||
// A benchmark ends when its Benchmark function returns or calls any of the methods
|
||||
// FailNow, Fatal, Fatalf, SkipNow, Skip, or Skipf. Those methods must be called
|
||||
@@ -93,7 +94,7 @@ type InternalBenchmark struct {
|
||||
type B struct {
|
||||
common
|
||||
importPath string // import path of the package containing the benchmark
|
||||
context *benchContext
|
||||
bstate *benchState
|
||||
N int
|
||||
previousN int // number of iterations in the previous run
|
||||
previousDuration time.Duration // total duration of the previous run
|
||||
@@ -113,6 +114,10 @@ type B struct {
|
||||
netBytes uint64
|
||||
// Extra metrics collected by ReportMetric.
|
||||
extra map[string]float64
|
||||
// For Loop() to be executed in benchFunc.
|
||||
// Loop() has its own control logic that skips the loop scaling.
|
||||
// See issue #61515.
|
||||
loopN int
|
||||
}
|
||||
|
||||
// StartTimer starts timing a test. This function is called automatically
|
||||
@@ -129,8 +134,7 @@ func (b *B) StartTimer() {
|
||||
}
|
||||
|
||||
// StopTimer stops timing a test. This can be used to pause the timer
|
||||
// while performing complex initialization that you don't
|
||||
// want to measure.
|
||||
// while performing steps that you don't want to measure.
|
||||
func (b *B) StopTimer() {
|
||||
if b.timerOn {
|
||||
b.duration += highPrecisionTimeSince(b.start)
|
||||
@@ -178,6 +182,7 @@ func (b *B) ReportAllocs() {
|
||||
func (b *B) runN(n int) {
|
||||
benchmarkLock.Lock()
|
||||
defer benchmarkLock.Unlock()
|
||||
ctx, cancelCtx := context.WithCancel(context.Background())
|
||||
defer func() {
|
||||
b.runCleanup(normalPanic)
|
||||
b.checkRaces()
|
||||
@@ -187,6 +192,10 @@ func (b *B) runN(n int) {
|
||||
runtime.GC()
|
||||
b.resetRaces()
|
||||
b.N = n
|
||||
b.loopN = 0
|
||||
b.ctx = ctx
|
||||
b.cancelCtx = cancelCtx
|
||||
|
||||
b.parallelism = 1
|
||||
b.ResetTimer()
|
||||
b.StartTimer()
|
||||
@@ -199,10 +208,10 @@ func (b *B) runN(n int) {
|
||||
// run1 runs the first iteration of benchFunc. It reports whether more
|
||||
// iterations of this benchmarks should be run.
|
||||
func (b *B) run1() bool {
|
||||
if ctx := b.context; ctx != nil {
|
||||
if bstate := b.bstate; bstate != nil {
|
||||
// Extend maxLen, if needed.
|
||||
if n := len(b.name) + ctx.extLen + 1; n > ctx.maxLen {
|
||||
ctx.maxLen = n + 8 // Add additional slack to avoid too many jumps in size.
|
||||
if n := len(b.name) + bstate.extLen + 1; n > bstate.maxLen {
|
||||
bstate.maxLen = n + 8 // Add additional slack to avoid too many jumps in size.
|
||||
}
|
||||
}
|
||||
go func() {
|
||||
@@ -253,9 +262,9 @@ func (b *B) run() {
|
||||
fmt.Fprintf(b.w, "cpu: %s\n", cpu)
|
||||
}
|
||||
})
|
||||
if b.context != nil {
|
||||
if b.bstate != nil {
|
||||
// Running go test --test.bench
|
||||
b.context.processBench(b) // Must call doBench.
|
||||
b.bstate.processBench(b) // Must call doBench.
|
||||
} else {
|
||||
// Running func Benchmark.
|
||||
b.doBench()
|
||||
@@ -268,6 +277,29 @@ func (b *B) doBench() BenchmarkResult {
|
||||
return b.result
|
||||
}
|
||||
|
||||
func predictN(goalns int64, prevIters int64, prevns int64, last int64) int {
|
||||
if prevns == 0 {
|
||||
// Round up to dodge divide by zero. See https://go.dev/issue/70709.
|
||||
prevns = 1
|
||||
}
|
||||
|
||||
// Order of operations matters.
|
||||
// For very fast benchmarks, prevIters ~= prevns.
|
||||
// If you divide first, you get 0 or 1,
|
||||
// which can hide an order of magnitude in execution time.
|
||||
// So multiply first, then divide.
|
||||
n := goalns * prevIters / prevns
|
||||
// Run more iterations than we think we'll need (1.2x).
|
||||
n += n / 5
|
||||
// Don't grow too fast in case we had timing errors previously.
|
||||
n = min(n, 100*last)
|
||||
// Be sure to run at least one more than last time.
|
||||
n = max(n, last+1)
|
||||
// Don't run more than 1e9 times. (This also keeps n in int range on 32 bit platforms.)
|
||||
n = min(n, 1e9)
|
||||
return int(n)
|
||||
}
|
||||
|
||||
// launch launches the benchmark function. It gradually increases the number
|
||||
// of benchmark iterations until the benchmark runs for the requested benchtime.
|
||||
// launch is run by the doBench function as a separate goroutine.
|
||||
@@ -279,41 +311,27 @@ func (b *B) launch() {
|
||||
b.signal <- true
|
||||
}()
|
||||
|
||||
// Run the benchmark for at least the specified amount of time.
|
||||
if b.benchTime.n > 0 {
|
||||
// We already ran a single iteration in run1.
|
||||
// If -benchtime=1x was requested, use that result.
|
||||
// See https://golang.org/issue/32051.
|
||||
if b.benchTime.n > 1 {
|
||||
b.runN(b.benchTime.n)
|
||||
}
|
||||
} else {
|
||||
d := b.benchTime.d
|
||||
for n := int64(1); !b.failed && b.duration < d && n < 1e9; {
|
||||
last := n
|
||||
// Predict required iterations.
|
||||
goalns := d.Nanoseconds()
|
||||
prevIters := int64(b.N)
|
||||
prevns := b.duration.Nanoseconds()
|
||||
if prevns <= 0 {
|
||||
// Round up, to avoid div by zero.
|
||||
prevns = 1
|
||||
// b.Loop does its own ramp-up logic so we just need to run it once.
|
||||
// If b.loopN is non zero, it means b.Loop has already run.
|
||||
if b.loopN == 0 {
|
||||
// Run the benchmark for at least the specified amount of time.
|
||||
if b.benchTime.n > 0 {
|
||||
// We already ran a single iteration in run1.
|
||||
// If -benchtime=1x was requested, use that result.
|
||||
// See https://golang.org/issue/32051.
|
||||
if b.benchTime.n > 1 {
|
||||
b.runN(b.benchTime.n)
|
||||
}
|
||||
} else {
|
||||
d := b.benchTime.d
|
||||
for n := int64(1); !b.failed && b.duration < d && n < 1e9; {
|
||||
last := n
|
||||
// Predict required iterations.
|
||||
goalns := d.Nanoseconds()
|
||||
prevIters := int64(b.N)
|
||||
n = int64(predictN(goalns, prevIters, b.duration.Nanoseconds(), last))
|
||||
b.runN(int(n))
|
||||
}
|
||||
// Order of operations matters.
|
||||
// For very fast benchmarks, prevIters ~= prevns.
|
||||
// If you divide first, you get 0 or 1,
|
||||
// which can hide an order of magnitude in execution time.
|
||||
// So multiply first, then divide.
|
||||
n = goalns * prevIters / prevns
|
||||
// Run more iterations than we think we'll need (1.2x).
|
||||
n += n / 5
|
||||
// Don't grow too fast in case we had timing errors previously.
|
||||
n = min(n, 100*last)
|
||||
// Be sure to run at least one more than last time.
|
||||
n = max(n, last+1)
|
||||
// Don't run more than 1e9 times. (This also keeps n in int range on 32 bit platforms.)
|
||||
n = min(n, 1e9)
|
||||
b.runN(int(n))
|
||||
}
|
||||
}
|
||||
b.result = BenchmarkResult{b.N, b.duration, b.bytes, b.netAllocs, b.netBytes, b.extra}
|
||||
@@ -349,6 +367,86 @@ func (b *B) ReportMetric(n float64, unit string) {
|
||||
b.extra[unit] = n
|
||||
}
|
||||
|
||||
func (b *B) stopOrScaleBLoop() bool {
|
||||
timeElapsed := highPrecisionTimeSince(b.start)
|
||||
if timeElapsed >= b.benchTime.d {
|
||||
// Stop the timer so we don't count cleanup time
|
||||
b.StopTimer()
|
||||
return false
|
||||
}
|
||||
// Loop scaling
|
||||
goalns := b.benchTime.d.Nanoseconds()
|
||||
prevIters := int64(b.N)
|
||||
b.N = predictN(goalns, prevIters, timeElapsed.Nanoseconds(), prevIters)
|
||||
b.loopN++
|
||||
return true
|
||||
}
|
||||
|
||||
func (b *B) loopSlowPath() bool {
|
||||
if b.loopN == 0 {
|
||||
// If it's the first call to b.Loop() in the benchmark function.
|
||||
// Allows more precise measurement of benchmark loop cost counts.
|
||||
// Also initialize b.N to 1 to kick start loop scaling.
|
||||
b.N = 1
|
||||
b.loopN = 1
|
||||
b.ResetTimer()
|
||||
return true
|
||||
}
|
||||
// Handles fixed iterations case
|
||||
if b.benchTime.n > 0 {
|
||||
if b.N < b.benchTime.n {
|
||||
b.N = b.benchTime.n
|
||||
b.loopN++
|
||||
return true
|
||||
}
|
||||
b.StopTimer()
|
||||
return false
|
||||
}
|
||||
// Handles fixed time case
|
||||
return b.stopOrScaleBLoop()
|
||||
}
|
||||
|
||||
// Loop returns true as long as the benchmark should continue running.
|
||||
//
|
||||
// A typical benchmark is structured like:
|
||||
//
|
||||
// func Benchmark(b *testing.B) {
|
||||
// ... setup ...
|
||||
// for b.Loop() {
|
||||
// ... code to measure ...
|
||||
// }
|
||||
// ... cleanup ...
|
||||
// }
|
||||
//
|
||||
// Loop resets the benchmark timer the first time it is called in a benchmark,
|
||||
// so any setup performed prior to starting the benchmark loop does not count
|
||||
// toward the benchmark measurement. Likewise, when it returns false, it stops
|
||||
// the timer so cleanup code is not measured.
|
||||
//
|
||||
// The compiler never optimizes away calls to functions within the body of a
|
||||
// "for b.Loop() { ... }" loop. This prevents surprises that can otherwise occur
|
||||
// if the compiler determines that the result of a benchmarked function is
|
||||
// unused. The loop must be written in exactly this form, and this only applies
|
||||
// to calls syntactically between the curly braces of the loop. Optimizations
|
||||
// are performed as usual in any functions called by the loop.
|
||||
//
|
||||
// After Loop returns false, b.N contains the total number of iterations that
|
||||
// ran, so the benchmark may use b.N to compute other average metrics.
|
||||
//
|
||||
// Prior to the introduction of Loop, benchmarks were expected to contain an
|
||||
// explicit loop from 0 to b.N. Benchmarks should either use Loop or contain a
|
||||
// loop to b.N, but not both. Loop offers more automatic management of the
|
||||
// benchmark timer, and runs each benchmark function only once per measurement,
|
||||
// whereas b.N-based benchmarks must run the benchmark function (and any
|
||||
// associated setup and cleanup) several times.
|
||||
func (b *B) Loop() bool {
|
||||
if b.loopN != 0 && b.loopN < b.N {
|
||||
b.loopN++
|
||||
return true
|
||||
}
|
||||
return b.loopSlowPath()
|
||||
}
|
||||
|
||||
// BenchmarkResult contains the results of a benchmark run.
|
||||
type BenchmarkResult struct {
|
||||
N int // The number of iterations.
|
||||
@@ -492,7 +590,7 @@ func benchmarkName(name string, n int) string {
|
||||
return name
|
||||
}
|
||||
|
||||
type benchContext struct {
|
||||
type benchState struct {
|
||||
match *matcher
|
||||
|
||||
maxLen int // The largest recorded benchmark name.
|
||||
@@ -517,17 +615,17 @@ func runBenchmarks(importPath string, matchString func(pat, str string) (bool, e
|
||||
maxprocs = procs
|
||||
}
|
||||
}
|
||||
ctx := &benchContext{
|
||||
bstate := &benchState{
|
||||
match: newMatcher(matchString, *matchBenchmarks, "-test.bench", *skip),
|
||||
extLen: len(benchmarkName("", maxprocs)),
|
||||
}
|
||||
var bs []InternalBenchmark
|
||||
for _, Benchmark := range benchmarks {
|
||||
if _, matched, _ := ctx.match.fullName(nil, Benchmark.Name); matched {
|
||||
if _, matched, _ := bstate.match.fullName(nil, Benchmark.Name); matched {
|
||||
bs = append(bs, Benchmark)
|
||||
benchName := benchmarkName(Benchmark.Name, maxprocs)
|
||||
if l := len(benchName) + ctx.extLen + 1; l > ctx.maxLen {
|
||||
ctx.maxLen = l
|
||||
if l := len(benchName) + bstate.extLen + 1; l > bstate.maxLen {
|
||||
bstate.maxLen = l
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -544,7 +642,7 @@ func runBenchmarks(importPath string, matchString func(pat, str string) (bool, e
|
||||
}
|
||||
},
|
||||
benchTime: benchTime,
|
||||
context: ctx,
|
||||
bstate: bstate,
|
||||
}
|
||||
if Verbose() {
|
||||
main.chatty = newChattyPrinter(main.w)
|
||||
@@ -554,7 +652,7 @@ func runBenchmarks(importPath string, matchString func(pat, str string) (bool, e
|
||||
}
|
||||
|
||||
// processBench runs bench b for the configured CPU counts and prints the results.
|
||||
func (ctx *benchContext) processBench(b *B) {
|
||||
func (s *benchState) processBench(b *B) {
|
||||
for i, procs := range cpuList {
|
||||
for j := uint(0); j < *count; j++ {
|
||||
runtime.GOMAXPROCS(procs)
|
||||
@@ -562,7 +660,7 @@ func (ctx *benchContext) processBench(b *B) {
|
||||
|
||||
// If it's chatty, we've already printed this information.
|
||||
if b.chatty == nil {
|
||||
fmt.Fprintf(b.w, "%-*s\t", ctx.maxLen, benchName)
|
||||
fmt.Fprintf(b.w, "%-*s\t", s.maxLen, benchName)
|
||||
}
|
||||
// Recompute the running time for all but the first iteration.
|
||||
if i > 0 || j > 0 {
|
||||
@@ -589,7 +687,7 @@ func (ctx *benchContext) processBench(b *B) {
|
||||
}
|
||||
results := r.String()
|
||||
if b.chatty != nil {
|
||||
fmt.Fprintf(b.w, "%-*s\t", ctx.maxLen, benchName)
|
||||
fmt.Fprintf(b.w, "%-*s\t", s.maxLen, benchName)
|
||||
}
|
||||
if *benchmarkMemory || b.showAllocResult {
|
||||
results += "\t" + r.MemString()
|
||||
@@ -629,8 +727,8 @@ func (b *B) Run(name string, f func(b *B)) bool {
|
||||
defer benchmarkLock.Lock()
|
||||
|
||||
benchName, ok, partial := b.name, true, false
|
||||
if b.context != nil {
|
||||
benchName, ok, partial = b.context.match.fullName(&b.common, name)
|
||||
if b.bstate != nil {
|
||||
benchName, ok, partial = b.bstate.match.fullName(&b.common, name)
|
||||
}
|
||||
if !ok {
|
||||
return true
|
||||
@@ -651,7 +749,7 @@ func (b *B) Run(name string, f func(b *B)) bool {
|
||||
importPath: b.importPath,
|
||||
benchFunc: f,
|
||||
benchTime: b.benchTime,
|
||||
context: b.context,
|
||||
bstate: b.bstate,
|
||||
}
|
||||
if partial {
|
||||
// Partial name match, like -bench=X/Y matching BenchmarkX.
|
||||
|
||||
@@ -7,6 +7,8 @@ package testing_test
|
||||
import (
|
||||
"bytes"
|
||||
"cmp"
|
||||
"context"
|
||||
"errors"
|
||||
"runtime"
|
||||
"slices"
|
||||
"strings"
|
||||
@@ -127,6 +129,34 @@ func TestRunParallelSkipNow(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestBenchmarkContext(t *testing.T) {
|
||||
testing.Benchmark(func(b *testing.B) {
|
||||
ctx := b.Context()
|
||||
if err := ctx.Err(); err != nil {
|
||||
b.Fatalf("expected non-canceled context, got %v", err)
|
||||
}
|
||||
|
||||
var innerCtx context.Context
|
||||
b.Run("inner", func(b *testing.B) {
|
||||
innerCtx = b.Context()
|
||||
if err := innerCtx.Err(); err != nil {
|
||||
b.Fatalf("expected inner benchmark to not inherit canceled context, got %v", err)
|
||||
}
|
||||
})
|
||||
b.Run("inner2", func(b *testing.B) {
|
||||
if !errors.Is(innerCtx.Err(), context.Canceled) {
|
||||
t.Fatal("expected context of sibling benchmark to be canceled after its test function finished")
|
||||
}
|
||||
})
|
||||
|
||||
t.Cleanup(func() {
|
||||
if !errors.Is(ctx.Err(), context.Canceled) {
|
||||
t.Fatal("expected context canceled before cleanup")
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func ExampleB_RunParallel() {
|
||||
// Parallel benchmark for text/template.Template.Execute on a single object.
|
||||
testing.Benchmark(func(b *testing.B) {
|
||||
@@ -167,7 +197,7 @@ func ExampleB_ReportMetric() {
|
||||
// specific algorithm (in this case, sorting).
|
||||
testing.Benchmark(func(b *testing.B) {
|
||||
var compares int64
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
s := []int{5, 4, 3, 2, 1}
|
||||
slices.SortFunc(s, func(a, b int) int {
|
||||
compares++
|
||||
|
||||
@@ -6,6 +6,7 @@ package testing
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
"slices"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -66,6 +67,10 @@ func (eg *InternalExample) processRunResult(stdout string, timeSpent time.Durati
|
||||
var fail string
|
||||
got := strings.TrimSpace(stdout)
|
||||
want := strings.TrimSpace(eg.Output)
|
||||
if runtime.GOOS == "windows" {
|
||||
got = strings.ReplaceAll(got, "\r\n", "\n")
|
||||
want = strings.ReplaceAll(want, "\r\n", "\n")
|
||||
}
|
||||
if eg.Unordered {
|
||||
if sortLines(got) != sortLines(want) && recovered == nil {
|
||||
fail = fmt.Sprintf("got:\n%s\nwant (unordered):\n%s\n", stdout, eg.Output)
|
||||
|
||||
48
src/testing/example_loop_test.go
Normal file
48
src/testing/example_loop_test.go
Normal file
@@ -0,0 +1,48 @@
|
||||
// Copyright 2024 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package testing_test
|
||||
|
||||
import (
|
||||
"math/rand/v2"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// ExBenchmark shows how to use b.Loop in a benchmark.
|
||||
//
|
||||
// (If this were a real benchmark, not an example, this would be named
|
||||
// BenchmarkSomething.)
|
||||
func ExBenchmark(b *testing.B) {
|
||||
// Generate a large random slice to use as an input.
|
||||
// Since this is done before the first call to b.Loop(),
|
||||
// it doesn't count toward the benchmark time.
|
||||
input := make([]int, 128<<10)
|
||||
for i := range input {
|
||||
input[i] = rand.Int()
|
||||
}
|
||||
|
||||
// Perform the benchmark.
|
||||
for b.Loop() {
|
||||
// Normally, the compiler would be allowed to optimize away the call
|
||||
// to sum because it has no side effects and the result isn't used.
|
||||
// However, inside a b.Loop loop, the compiler ensures function calls
|
||||
// aren't optimized away.
|
||||
sum(input)
|
||||
}
|
||||
|
||||
// Outside the loop, the timer is stopped, so we could perform
|
||||
// cleanup if necessary without affecting the result.
|
||||
}
|
||||
|
||||
func sum(data []int) int {
|
||||
total := 0
|
||||
for _, value := range data {
|
||||
total += value
|
||||
}
|
||||
return total
|
||||
}
|
||||
|
||||
func ExampleB_Loop() {
|
||||
testing.Benchmark(ExBenchmark)
|
||||
}
|
||||
@@ -9,3 +9,5 @@ var PrettyPrint = prettyPrint
|
||||
type HighPrecisionTime = highPrecisionTime
|
||||
|
||||
var HighPrecisionTimeNow = highPrecisionTimeNow
|
||||
|
||||
const ParallelConflict = parallelConflict
|
||||
|
||||
@@ -28,11 +28,7 @@ func TestFlag(t *testing.T) {
|
||||
flag := flag
|
||||
t.Run(flag, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
exe, err := os.Executable()
|
||||
if err != nil {
|
||||
exe = os.Args[0]
|
||||
}
|
||||
cmd := exec.Command(exe, "-test.run=^TestFlag$", "-test_flag_arg="+flag)
|
||||
cmd := exec.Command(testenv.Executable(t), "-test.run=^TestFlag$", "-test_flag_arg="+flag)
|
||||
if flag != "" {
|
||||
cmd.Args = append(cmd.Args, flag)
|
||||
}
|
||||
|
||||
@@ -10,8 +10,8 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"maps"
|
||||
"path"
|
||||
"reflect"
|
||||
"slices"
|
||||
"strings"
|
||||
"testing/iotest"
|
||||
@@ -72,13 +72,7 @@ func testFS(fsys fs.FS, expected ...string) error {
|
||||
}
|
||||
delete(found, ".")
|
||||
if len(expected) == 0 && len(found) > 0 {
|
||||
var list []string
|
||||
for k := range found {
|
||||
if k != "." {
|
||||
list = append(list, k)
|
||||
}
|
||||
}
|
||||
slices.Sort(list)
|
||||
list := slices.Sorted(maps.Keys(found))
|
||||
if len(list) > 15 {
|
||||
list = append(list[:10], "...")
|
||||
}
|
||||
@@ -358,7 +352,7 @@ func (t *fsTester) checkGlob(dir string, list []fs.DirEntry) {
|
||||
t.errorf("%s: Glob(%#q): %w", dir, glob, err)
|
||||
return
|
||||
}
|
||||
if reflect.DeepEqual(want, names) {
|
||||
if slices.Equal(want, names) {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -576,7 +570,7 @@ func (t *fsTester) checkFileRead(file, desc string, data1, data2 []byte) {
|
||||
}
|
||||
}
|
||||
|
||||
// checkBadPath checks that various invalid forms of file's name cannot be opened using t.fsys.Open.
|
||||
// checkOpen validates file opening behavior by attempting to open and then close the given file path.
|
||||
func (t *fsTester) checkOpen(file string) {
|
||||
t.checkBadPath(file, "Open", func(file string) error {
|
||||
f, err := t.fsys.Open(file)
|
||||
|
||||
@@ -5,6 +5,7 @@
|
||||
package testing
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
@@ -67,8 +68,8 @@ type InternalFuzzTarget struct {
|
||||
// that are allowed in the (*F).Fuzz function are (*F).Failed and (*F).Name.
|
||||
type F struct {
|
||||
common
|
||||
fuzzContext *fuzzContext
|
||||
testContext *testContext
|
||||
fstate *fuzzState
|
||||
tstate *testState
|
||||
|
||||
// inFuzzFn is true when the fuzz function is running. Most F methods cannot
|
||||
// be called when inFuzzFn is true.
|
||||
@@ -244,22 +245,22 @@ func (f *F) Fuzz(ff any) {
|
||||
// corpus and entries declared with F.Add.
|
||||
//
|
||||
// Don't load the seed corpus if this is a worker process; we won't use it.
|
||||
if f.fuzzContext.mode != fuzzWorker {
|
||||
if f.fstate.mode != fuzzWorker {
|
||||
for _, c := range f.corpus {
|
||||
if err := f.fuzzContext.deps.CheckCorpus(c.Values, types); err != nil {
|
||||
if err := f.fstate.deps.CheckCorpus(c.Values, types); err != nil {
|
||||
// TODO(#48302): Report the source location of the F.Add call.
|
||||
f.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Load seed corpus
|
||||
c, err := f.fuzzContext.deps.ReadCorpus(filepath.Join(corpusDir, f.name), types)
|
||||
c, err := f.fstate.deps.ReadCorpus(filepath.Join(corpusDir, f.name), types)
|
||||
if err != nil {
|
||||
f.Fatal(err)
|
||||
}
|
||||
for i := range c {
|
||||
c[i].IsSeed = true // these are all seed corpus values
|
||||
if f.fuzzContext.mode == fuzzCoordinator {
|
||||
if f.fstate.mode == fuzzCoordinator {
|
||||
// If this is the coordinator process, zero the values, since we don't need
|
||||
// to hold onto them.
|
||||
c[i].Values = nil
|
||||
@@ -285,14 +286,16 @@ func (f *F) Fuzz(ff any) {
|
||||
if e.Path != "" {
|
||||
testName = fmt.Sprintf("%s/%s", testName, filepath.Base(e.Path))
|
||||
}
|
||||
if f.testContext.isFuzzing {
|
||||
if f.tstate.isFuzzing {
|
||||
// Don't preserve subtest names while fuzzing. If fn calls T.Run,
|
||||
// there will be a very large number of subtests with duplicate names,
|
||||
// which will use a large amount of memory. The subtest names aren't
|
||||
// useful since there's no way to re-run them deterministically.
|
||||
f.testContext.match.clearSubNames()
|
||||
f.tstate.match.clearSubNames()
|
||||
}
|
||||
|
||||
ctx, cancelCtx := context.WithCancel(f.ctx)
|
||||
|
||||
// Record the stack trace at the point of this call so that if the subtest
|
||||
// function - which runs in a separate stack - is marked as a helper, we can
|
||||
// continue walking the stack into the parent test.
|
||||
@@ -300,15 +303,17 @@ func (f *F) Fuzz(ff any) {
|
||||
n := runtime.Callers(2, pc[:])
|
||||
t := &T{
|
||||
common: common{
|
||||
barrier: make(chan bool),
|
||||
signal: make(chan bool),
|
||||
name: testName,
|
||||
parent: &f.common,
|
||||
level: f.level + 1,
|
||||
creator: pc[:n],
|
||||
chatty: f.chatty,
|
||||
barrier: make(chan bool),
|
||||
signal: make(chan bool),
|
||||
name: testName,
|
||||
parent: &f.common,
|
||||
level: f.level + 1,
|
||||
creator: pc[:n],
|
||||
chatty: f.chatty,
|
||||
ctx: ctx,
|
||||
cancelCtx: cancelCtx,
|
||||
},
|
||||
context: f.testContext,
|
||||
tstate: f.tstate,
|
||||
}
|
||||
if captureOut != nil {
|
||||
// t.parent aliases f.common.
|
||||
@@ -328,9 +333,9 @@ func (f *F) Fuzz(ff any) {
|
||||
// we make sure it is called right before the tRunner function
|
||||
// exits, regardless of whether it was executed cleanly, panicked,
|
||||
// or if the fuzzFn called t.Fatal.
|
||||
if f.testContext.isFuzzing {
|
||||
defer f.fuzzContext.deps.SnapshotCoverage()
|
||||
f.fuzzContext.deps.ResetCoverage()
|
||||
if f.tstate.isFuzzing {
|
||||
defer f.fstate.deps.SnapshotCoverage()
|
||||
f.fstate.deps.ResetCoverage()
|
||||
}
|
||||
fn.Call(args)
|
||||
})
|
||||
@@ -342,14 +347,14 @@ func (f *F) Fuzz(ff any) {
|
||||
return !t.Failed()
|
||||
}
|
||||
|
||||
switch f.fuzzContext.mode {
|
||||
switch f.fstate.mode {
|
||||
case fuzzCoordinator:
|
||||
// Fuzzing is enabled, and this is the test process started by 'go test'.
|
||||
// Act as the coordinator process, and coordinate workers to perform the
|
||||
// actual fuzzing.
|
||||
corpusTargetDir := filepath.Join(corpusDir, f.name)
|
||||
cacheTargetDir := filepath.Join(*fuzzCacheDir, f.name)
|
||||
err := f.fuzzContext.deps.CoordinateFuzzing(
|
||||
err := f.fstate.deps.CoordinateFuzzing(
|
||||
fuzzDuration.d,
|
||||
int64(fuzzDuration.n),
|
||||
minimizeDuration.d,
|
||||
@@ -376,7 +381,7 @@ func (f *F) Fuzz(ff any) {
|
||||
case fuzzWorker:
|
||||
// Fuzzing is enabled, and this is a worker process. Follow instructions
|
||||
// from the coordinator.
|
||||
if err := f.fuzzContext.deps.RunFuzzWorker(func(e corpusEntry) error {
|
||||
if err := f.fstate.deps.RunFuzzWorker(func(e corpusEntry) error {
|
||||
// Don't write to f.w (which points to Stdout) if running from a
|
||||
// fuzz worker. This would become very verbose, particularly during
|
||||
// minimization. Return the error instead, and let the caller deal
|
||||
@@ -398,7 +403,7 @@ func (f *F) Fuzz(ff any) {
|
||||
// corpus now.
|
||||
for _, e := range f.corpus {
|
||||
name := fmt.Sprintf("%s/%s", f.name, filepath.Base(e.Path))
|
||||
if _, ok, _ := f.testContext.match.fullName(nil, name); ok {
|
||||
if _, ok, _ := f.tstate.match.fullName(nil, name); ok {
|
||||
run(f.w, e)
|
||||
}
|
||||
}
|
||||
@@ -451,8 +456,8 @@ type fuzzCrashError interface {
|
||||
CrashPath() string
|
||||
}
|
||||
|
||||
// fuzzContext holds fields common to all fuzz tests.
|
||||
type fuzzContext struct {
|
||||
// fuzzState holds fields common to all fuzz tests.
|
||||
type fuzzState struct {
|
||||
deps testDeps
|
||||
mode fuzzMode
|
||||
}
|
||||
@@ -486,9 +491,9 @@ func runFuzzTests(deps testDeps, fuzzTests []InternalFuzzTarget, deadline time.T
|
||||
break
|
||||
}
|
||||
|
||||
tctx := newTestContext(*parallel, m)
|
||||
tctx.deadline = deadline
|
||||
fctx := &fuzzContext{deps: deps, mode: seedCorpusOnly}
|
||||
tstate := newTestState(*parallel, m)
|
||||
tstate.deadline = deadline
|
||||
fstate := &fuzzState{deps: deps, mode: seedCorpusOnly}
|
||||
root := common{w: os.Stdout} // gather output in one place
|
||||
if Verbose() {
|
||||
root.chatty = newChattyPrinter(root.w)
|
||||
@@ -497,7 +502,7 @@ func runFuzzTests(deps testDeps, fuzzTests []InternalFuzzTarget, deadline time.T
|
||||
if shouldFailFast() {
|
||||
break
|
||||
}
|
||||
testName, matched, _ := tctx.match.fullName(nil, ft.Name)
|
||||
testName, matched, _ := tstate.match.fullName(nil, ft.Name)
|
||||
if !matched {
|
||||
continue
|
||||
}
|
||||
@@ -508,17 +513,20 @@ func runFuzzTests(deps testDeps, fuzzTests []InternalFuzzTarget, deadline time.T
|
||||
continue
|
||||
}
|
||||
}
|
||||
ctx, cancelCtx := context.WithCancel(context.Background())
|
||||
f := &F{
|
||||
common: common{
|
||||
signal: make(chan bool),
|
||||
barrier: make(chan bool),
|
||||
name: testName,
|
||||
parent: &root,
|
||||
level: root.level + 1,
|
||||
chatty: root.chatty,
|
||||
signal: make(chan bool),
|
||||
barrier: make(chan bool),
|
||||
name: testName,
|
||||
parent: &root,
|
||||
level: root.level + 1,
|
||||
chatty: root.chatty,
|
||||
ctx: ctx,
|
||||
cancelCtx: cancelCtx,
|
||||
},
|
||||
testContext: tctx,
|
||||
fuzzContext: fctx,
|
||||
tstate: tstate,
|
||||
fstate: fstate,
|
||||
}
|
||||
f.w = indenter{&f.common}
|
||||
if f.chatty != nil {
|
||||
@@ -554,17 +562,17 @@ func runFuzzing(deps testDeps, fuzzTests []InternalFuzzTarget) (ok bool) {
|
||||
return true
|
||||
}
|
||||
m := newMatcher(deps.MatchString, *matchFuzz, "-test.fuzz", *skip)
|
||||
tctx := newTestContext(1, m)
|
||||
tctx.isFuzzing = true
|
||||
fctx := &fuzzContext{
|
||||
tstate := newTestState(1, m)
|
||||
tstate.isFuzzing = true
|
||||
fstate := &fuzzState{
|
||||
deps: deps,
|
||||
}
|
||||
root := common{w: os.Stdout}
|
||||
if *isFuzzWorker {
|
||||
root.w = io.Discard
|
||||
fctx.mode = fuzzWorker
|
||||
fstate.mode = fuzzWorker
|
||||
} else {
|
||||
fctx.mode = fuzzCoordinator
|
||||
fstate.mode = fuzzCoordinator
|
||||
}
|
||||
if Verbose() && !*isFuzzWorker {
|
||||
root.chatty = newChattyPrinter(root.w)
|
||||
@@ -573,7 +581,7 @@ func runFuzzing(deps testDeps, fuzzTests []InternalFuzzTarget) (ok bool) {
|
||||
var testName string
|
||||
var matched []string
|
||||
for i := range fuzzTests {
|
||||
name, ok, _ := tctx.match.fullName(nil, fuzzTests[i].Name)
|
||||
name, ok, _ := tstate.match.fullName(nil, fuzzTests[i].Name)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
@@ -590,17 +598,20 @@ func runFuzzing(deps testDeps, fuzzTests []InternalFuzzTarget) (ok bool) {
|
||||
return false
|
||||
}
|
||||
|
||||
ctx, cancelCtx := context.WithCancel(context.Background())
|
||||
f := &F{
|
||||
common: common{
|
||||
signal: make(chan bool),
|
||||
barrier: nil, // T.Parallel has no effect when fuzzing.
|
||||
name: testName,
|
||||
parent: &root,
|
||||
level: root.level + 1,
|
||||
chatty: root.chatty,
|
||||
signal: make(chan bool),
|
||||
barrier: nil, // T.Parallel has no effect when fuzzing.
|
||||
name: testName,
|
||||
parent: &root,
|
||||
level: root.level + 1,
|
||||
chatty: root.chatty,
|
||||
ctx: ctx,
|
||||
cancelCtx: cancelCtx,
|
||||
},
|
||||
fuzzContext: fctx,
|
||||
testContext: tctx,
|
||||
fstate: fstate,
|
||||
tstate: tstate,
|
||||
}
|
||||
f.w = indenter{&f.common}
|
||||
if f.chatty != nil {
|
||||
@@ -694,7 +705,7 @@ func fRunner(f *F, fn func(*F)) {
|
||||
// This only affects fuzz tests run as normal tests.
|
||||
// While fuzzing, T.Parallel has no effect, so f.sub is empty, and this
|
||||
// branch is not taken. f.barrier is nil in that case.
|
||||
f.testContext.release()
|
||||
f.tstate.release()
|
||||
close(f.barrier)
|
||||
// Wait for the subtests to complete.
|
||||
for _, sub := range f.sub {
|
||||
|
||||
@@ -23,15 +23,9 @@ func TestTBHelper(t *testing.T) {
|
||||
return
|
||||
}
|
||||
|
||||
testenv.MustHaveExec(t)
|
||||
t.Parallel()
|
||||
|
||||
exe, err := os.Executable()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
cmd := testenv.Command(t, exe, "-test.run=^TestTBHelper$")
|
||||
cmd := testenv.Command(t, testenv.Executable(t), "-test.run=^TestTBHelper$")
|
||||
cmd = testenv.CleanCmdEnv(cmd)
|
||||
cmd.Env = append(cmd.Env, "GO_WANT_HELPER_PROCESS=1")
|
||||
out, _ := cmd.CombinedOutput()
|
||||
@@ -66,15 +60,9 @@ func TestTBHelperParallel(t *testing.T) {
|
||||
return
|
||||
}
|
||||
|
||||
testenv.MustHaveExec(t)
|
||||
t.Parallel()
|
||||
|
||||
exe, err := os.Executable()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
cmd := testenv.Command(t, exe, "-test.run=^TestTBHelperParallel$")
|
||||
cmd := testenv.Command(t, testenv.Executable(t), "-test.run=^TestTBHelperParallel$")
|
||||
cmd = testenv.CleanCmdEnv(cmd)
|
||||
cmd.Env = append(cmd.Env, "GO_WANT_HELPER_PROCESS=1")
|
||||
out, _ := cmd.CombinedOutput()
|
||||
|
||||
57
src/testing/loop_test.go
Normal file
57
src/testing/loop_test.go
Normal file
@@ -0,0 +1,57 @@
|
||||
// Copyright 2024 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package testing
|
||||
|
||||
func TestBenchmarkBLoop(t *T) {
|
||||
var initialStart highPrecisionTime
|
||||
var firstStart highPrecisionTime
|
||||
var lastStart highPrecisionTime
|
||||
var runningEnd bool
|
||||
runs := 0
|
||||
iters := 0
|
||||
finalBN := 0
|
||||
bRet := Benchmark(func(b *B) {
|
||||
initialStart = b.start
|
||||
runs++
|
||||
for b.Loop() {
|
||||
if iters == 0 {
|
||||
firstStart = b.start
|
||||
}
|
||||
lastStart = b.start
|
||||
iters++
|
||||
}
|
||||
finalBN = b.N
|
||||
runningEnd = b.timerOn
|
||||
})
|
||||
// Verify that a b.Loop benchmark is invoked just once.
|
||||
if runs != 1 {
|
||||
t.Errorf("want runs == 1, got %d", runs)
|
||||
}
|
||||
// Verify that at least one iteration ran.
|
||||
if iters == 0 {
|
||||
t.Fatalf("no iterations ran")
|
||||
}
|
||||
// Verify that b.N, bRet.N, and the b.Loop() iteration count match.
|
||||
if finalBN != iters || bRet.N != iters {
|
||||
t.Errorf("benchmark iterations mismatch: %d loop iterations, final b.N=%d, bRet.N=%d", iters, finalBN, bRet.N)
|
||||
}
|
||||
// Make sure the benchmark ran for an appropriate amount of time.
|
||||
if bRet.T < benchTime.d {
|
||||
t.Fatalf("benchmark ran for %s, want >= %s", bRet.T, benchTime.d)
|
||||
}
|
||||
// Verify that the timer is reset on the first loop, and then left alone.
|
||||
if firstStart == initialStart {
|
||||
t.Errorf("b.Loop did not reset the timer")
|
||||
}
|
||||
if lastStart != firstStart {
|
||||
t.Errorf("timer was reset during iteration")
|
||||
}
|
||||
// Verify that it stopped the timer after the last loop.
|
||||
if runningEnd {
|
||||
t.Errorf("timer was still running after last iteration")
|
||||
}
|
||||
}
|
||||
|
||||
// See also TestBenchmarkBLoop* in other files.
|
||||
@@ -265,7 +265,7 @@ func TestHandler(h slog.Handler, results func() []map[string]any) error {
|
||||
if g, w := len(res), len(cases); g != w {
|
||||
return fmt.Errorf("got %d results, want %d", g, w)
|
||||
}
|
||||
for i, got := range results() {
|
||||
for i, got := range res {
|
||||
c := cases[i]
|
||||
for _, check := range c.checks {
|
||||
if problem := check(got); problem != "" {
|
||||
|
||||
@@ -7,9 +7,9 @@ package testing
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"slices"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
@@ -21,12 +21,11 @@ func init() {
|
||||
benchTime.d = 100 * time.Millisecond
|
||||
}
|
||||
|
||||
func TestTestContext(t *T) {
|
||||
func TestTestState(t *T) {
|
||||
const (
|
||||
add1 = 0
|
||||
done = 1
|
||||
)
|
||||
// After each of the calls are applied to the context, the
|
||||
type call struct {
|
||||
typ int // run or done
|
||||
// result from applying the call
|
||||
@@ -72,7 +71,7 @@ func TestTestContext(t *T) {
|
||||
},
|
||||
}}
|
||||
for i, tc := range testCases {
|
||||
ctx := &testContext{
|
||||
tstate := &testState{
|
||||
startParallel: make(chan bool),
|
||||
maxParallel: tc.max,
|
||||
}
|
||||
@@ -88,18 +87,18 @@ func TestTestContext(t *T) {
|
||||
started := false
|
||||
switch call.typ {
|
||||
case add1:
|
||||
signal := doCall(ctx.waitParallel)
|
||||
signal := doCall(tstate.waitParallel)
|
||||
select {
|
||||
case <-signal:
|
||||
started = true
|
||||
case ctx.startParallel <- true:
|
||||
case tstate.startParallel <- true:
|
||||
<-signal
|
||||
}
|
||||
case done:
|
||||
signal := doCall(ctx.release)
|
||||
signal := doCall(tstate.release)
|
||||
select {
|
||||
case <-signal:
|
||||
case <-ctx.startParallel:
|
||||
case <-tstate.startParallel:
|
||||
started = true
|
||||
<-signal
|
||||
}
|
||||
@@ -107,11 +106,11 @@ func TestTestContext(t *T) {
|
||||
if started != call.started {
|
||||
t.Errorf("%d:%d:started: got %v; want %v", i, j, started, call.started)
|
||||
}
|
||||
if ctx.running != call.running {
|
||||
t.Errorf("%d:%d:running: got %v; want %v", i, j, ctx.running, call.running)
|
||||
if tstate.running != call.running {
|
||||
t.Errorf("%d:%d:running: got %v; want %v", i, j, tstate.running, call.running)
|
||||
}
|
||||
if ctx.numWaiting != call.waiting {
|
||||
t.Errorf("%d:%d:waiting: got %v; want %v", i, j, ctx.numWaiting, call.waiting)
|
||||
if tstate.numWaiting != call.waiting {
|
||||
t.Errorf("%d:%d:waiting: got %v; want %v", i, j, tstate.numWaiting, call.waiting)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -507,7 +506,7 @@ func TestTRun(t *T) {
|
||||
}}
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.desc, func(t *T) {
|
||||
ctx := newTestContext(tc.maxPar, allMatcher())
|
||||
tstate := newTestState(tc.maxPar, allMatcher())
|
||||
buf := &strings.Builder{}
|
||||
root := &T{
|
||||
common: common{
|
||||
@@ -516,14 +515,14 @@ func TestTRun(t *T) {
|
||||
name: "",
|
||||
w: buf,
|
||||
},
|
||||
context: ctx,
|
||||
tstate: tstate,
|
||||
}
|
||||
if tc.chatty {
|
||||
root.chatty = newChattyPrinter(root.w)
|
||||
root.chatty.json = tc.json
|
||||
}
|
||||
ok := root.Run(tc.desc, tc.f)
|
||||
ctx.release()
|
||||
tstate.release()
|
||||
|
||||
if ok != tc.ok {
|
||||
t.Errorf("%s:ok: got %v; want %v", tc.desc, ok, tc.ok)
|
||||
@@ -531,8 +530,8 @@ func TestTRun(t *T) {
|
||||
if ok != !root.Failed() {
|
||||
t.Errorf("%s:root failed: got %v; want %v", tc.desc, !ok, root.Failed())
|
||||
}
|
||||
if ctx.running != 0 || ctx.numWaiting != 0 {
|
||||
t.Errorf("%s:running and waiting non-zero: got %d and %d", tc.desc, ctx.running, ctx.numWaiting)
|
||||
if tstate.running != 0 || tstate.numWaiting != 0 {
|
||||
t.Errorf("%s:running and waiting non-zero: got %d and %d", tc.desc, tstate.running, tstate.numWaiting)
|
||||
}
|
||||
got := strings.TrimSpace(buf.String())
|
||||
want := strings.TrimSpace(tc.output)
|
||||
@@ -790,8 +789,8 @@ func TestRacyOutput(t *T) {
|
||||
}
|
||||
|
||||
root := &T{
|
||||
common: common{w: &funcWriter{raceDetector}},
|
||||
context: newTestContext(1, allMatcher()),
|
||||
common: common{w: &funcWriter{raceDetector}},
|
||||
tstate: newTestState(1, allMatcher()),
|
||||
}
|
||||
root.chatty = newChattyPrinter(root.w)
|
||||
root.Run("", func(t *T) {
|
||||
@@ -815,7 +814,7 @@ func TestRacyOutput(t *T) {
|
||||
|
||||
// The late log message did not include the test name. Issue 29388.
|
||||
func TestLogAfterComplete(t *T) {
|
||||
ctx := newTestContext(1, allMatcher())
|
||||
tstate := newTestState(1, allMatcher())
|
||||
var buf bytes.Buffer
|
||||
t1 := &T{
|
||||
common: common{
|
||||
@@ -824,7 +823,7 @@ func TestLogAfterComplete(t *T) {
|
||||
signal: make(chan bool, 1),
|
||||
w: &buf,
|
||||
},
|
||||
context: ctx,
|
||||
tstate: tstate,
|
||||
}
|
||||
|
||||
c1 := make(chan bool)
|
||||
@@ -886,7 +885,7 @@ func TestCleanup(t *T) {
|
||||
t.Cleanup(func() { cleanups = append(cleanups, 1) })
|
||||
t.Cleanup(func() { cleanups = append(cleanups, 2) })
|
||||
})
|
||||
if got, want := cleanups, []int{2, 1}; !reflect.DeepEqual(got, want) {
|
||||
if got, want := cleanups, []int{2, 1}; !slices.Equal(got, want) {
|
||||
t.Errorf("unexpected cleanup record; got %v want %v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
78
src/testing/synctest/context_example_test.go
Normal file
78
src/testing/synctest/context_example_test.go
Normal file
@@ -0,0 +1,78 @@
|
||||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build goexperiment.synctest
|
||||
|
||||
package synctest_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing/synctest"
|
||||
"time"
|
||||
)
|
||||
|
||||
// This example demonstrates testing the context.AfterFunc function.
|
||||
//
|
||||
// AfterFunc registers a function to execute in a new goroutine
|
||||
// after a context is canceled.
|
||||
//
|
||||
// The test verifies that the function is not run before the context is canceled,
|
||||
// and is run after the context is canceled.
|
||||
func Example_contextAfterFunc() {
|
||||
synctest.Run(func() {
|
||||
// Create a context.Context which can be canceled.
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
// context.AfterFunc registers a function to be called
|
||||
// when a context is canceled.
|
||||
afterFuncCalled := false
|
||||
context.AfterFunc(ctx, func() {
|
||||
afterFuncCalled = true
|
||||
})
|
||||
|
||||
// The context has not been canceled, so the AfterFunc is not called.
|
||||
synctest.Wait()
|
||||
fmt.Printf("before context is canceled: afterFuncCalled=%v\n", afterFuncCalled)
|
||||
|
||||
// Cancel the context and wait for the AfterFunc to finish executing.
|
||||
// Verify that the AfterFunc ran.
|
||||
cancel()
|
||||
synctest.Wait()
|
||||
fmt.Printf("after context is canceled: afterFuncCalled=%v\n", afterFuncCalled)
|
||||
|
||||
// Output:
|
||||
// before context is canceled: afterFuncCalled=false
|
||||
// after context is canceled: afterFuncCalled=true
|
||||
})
|
||||
}
|
||||
|
||||
// This example demonstrates testing the context.WithTimeout function.
|
||||
//
|
||||
// WithTimeout creates a context which is canceled after a timeout.
|
||||
//
|
||||
// The test verifies that the context is not canceled before the timeout expires,
|
||||
// and is canceled after the timeout expires.
|
||||
func Example_contextWithTimeout() {
|
||||
synctest.Run(func() {
|
||||
// Create a context.Context which is canceled after a timeout.
|
||||
const timeout = 5 * time.Second
|
||||
ctx, cancel := context.WithTimeout(context.Background(), timeout)
|
||||
defer cancel()
|
||||
|
||||
// Wait just less than the timeout.
|
||||
time.Sleep(timeout - time.Nanosecond)
|
||||
synctest.Wait()
|
||||
fmt.Printf("before timeout: ctx.Err() = %v\n", ctx.Err())
|
||||
|
||||
// Wait the rest of the way until the timeout.
|
||||
time.Sleep(time.Nanosecond)
|
||||
synctest.Wait()
|
||||
fmt.Printf("after timeout: ctx.Err() = %v\n", ctx.Err())
|
||||
|
||||
// Output:
|
||||
// before timeout: ctx.Err() = <nil>
|
||||
// after timeout: ctx.Err() = context deadline exceeded
|
||||
})
|
||||
}
|
||||
67
src/testing/synctest/synctest.go
Normal file
67
src/testing/synctest/synctest.go
Normal file
@@ -0,0 +1,67 @@
|
||||
// Copyright 2024 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build goexperiment.synctest
|
||||
|
||||
// Package synctest provides support for testing concurrent code.
|
||||
//
|
||||
// This package only exists when using Go compiled with GOEXPERIMENT=synctest.
|
||||
// It is experimental, and not subject to the Go 1 compatibility promise.
|
||||
package synctest
|
||||
|
||||
import (
|
||||
"internal/synctest"
|
||||
)
|
||||
|
||||
// Run executes f in a new goroutine.
|
||||
//
|
||||
// The new goroutine and any goroutines transitively started by it form
|
||||
// an isolated "bubble".
|
||||
// Run waits for all goroutines in the bubble to exit before returning.
|
||||
//
|
||||
// Goroutines in the bubble use a synthetic time implementation.
|
||||
// The initial time is midnight UTC 2000-01-01.
|
||||
//
|
||||
// Time advances when every goroutine in the bubble is blocked.
|
||||
// For example, a call to time.Sleep will block until all other
|
||||
// goroutines are blocked and return after the bubble's clock has
|
||||
// advanced. See [Wait] for the specific definition of blocked.
|
||||
//
|
||||
// If every goroutine is blocked and there are no timers scheduled,
|
||||
// Run panics.
|
||||
//
|
||||
// Channels, time.Timers, and time.Tickers created within the bubble
|
||||
// are associated with it. Operating on a bubbled channel, timer, or ticker
|
||||
// from outside the bubble panics.
|
||||
func Run(f func()) {
|
||||
synctest.Run(f)
|
||||
}
|
||||
|
||||
// Wait blocks until every goroutine within the current bubble,
|
||||
// other than the current goroutine, is durably blocked.
|
||||
// It panics if called from a non-bubbled goroutine,
|
||||
// or if two goroutines in the same bubble call Wait at the same time.
|
||||
//
|
||||
// A goroutine is durably blocked if can only be unblocked by another
|
||||
// goroutine in its bubble. The following operations durably block
|
||||
// a goroutine:
|
||||
// - a send or receive on a channel from within the bubble
|
||||
// - a select statement where every case is a channel within the bubble
|
||||
// - sync.Cond.Wait
|
||||
// - time.Sleep
|
||||
//
|
||||
// A goroutine executing a system call or waiting for an external event
|
||||
// such as a network operation is not durably blocked.
|
||||
// For example, a goroutine blocked reading from an network connection
|
||||
// is not durably blocked even if no data is currently available on the
|
||||
// connection, because it may be unblocked by data written from outside
|
||||
// the bubble or may be in the process of receiving data from a kernel
|
||||
// network buffer.
|
||||
//
|
||||
// A goroutine is not durably blocked when blocked on a send or receive
|
||||
// on a channel that was not created within its bubble, because it may
|
||||
// be unblocked by a channel receive or send from outside its bubble.
|
||||
func Wait() {
|
||||
synctest.Wait()
|
||||
}
|
||||
@@ -72,27 +72,24 @@
|
||||
// A sample benchmark function looks like this:
|
||||
//
|
||||
// func BenchmarkRandInt(b *testing.B) {
|
||||
// for range b.N {
|
||||
// for b.Loop() {
|
||||
// rand.Int()
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// The benchmark function must run the target code b.N times.
|
||||
// It is called multiple times with b.N adjusted until the
|
||||
// benchmark function lasts long enough to be timed reliably.
|
||||
// The output
|
||||
//
|
||||
// BenchmarkRandInt-8 68453040 17.8 ns/op
|
||||
//
|
||||
// means that the loop ran 68453040 times at a speed of 17.8 ns per loop.
|
||||
// means that the body of the loop ran 68453040 times at a speed of 17.8 ns per loop.
|
||||
//
|
||||
// If a benchmark needs some expensive setup before running, the timer
|
||||
// may be reset:
|
||||
// Only the body of the loop is timed, so benchmarks may do expensive
|
||||
// setup before calling b.Loop, which will not be counted toward the
|
||||
// benchmark measurement:
|
||||
//
|
||||
// func BenchmarkBigLen(b *testing.B) {
|
||||
// big := NewBig()
|
||||
// b.ResetTimer()
|
||||
// for range b.N {
|
||||
// for b.Loop() {
|
||||
// big.Len()
|
||||
// }
|
||||
// }
|
||||
@@ -120,6 +117,37 @@
|
||||
// In particular, https://golang.org/x/perf/cmd/benchstat performs
|
||||
// statistically robust A/B comparisons.
|
||||
//
|
||||
// # b.N-style benchmarks
|
||||
//
|
||||
// Prior to the introduction of [B.Loop], benchmarks were written in a
|
||||
// different style using B.N. For example:
|
||||
//
|
||||
// func BenchmarkRandInt(b *testing.B) {
|
||||
// for range b.N {
|
||||
// rand.Int()
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// In this style of benchmark, the benchmark function must run
|
||||
// the target code b.N times. The benchmark function is called
|
||||
// multiple times with b.N adjusted until the benchmark function
|
||||
// lasts long enough to be timed reliably. This also means any setup
|
||||
// done before the loop may be run several times.
|
||||
//
|
||||
// If a benchmark needs some expensive setup before running, the timer
|
||||
// should be explicitly reset:
|
||||
//
|
||||
// func BenchmarkBigLen(b *testing.B) {
|
||||
// big := NewBig()
|
||||
// b.ResetTimer()
|
||||
// for range b.N {
|
||||
// big.Len()
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// New benchmarks should prefer using [B.Loop], which is more robust
|
||||
// and more efficient.
|
||||
//
|
||||
// # Examples
|
||||
//
|
||||
// The package also runs and verifies example code. Example functions may
|
||||
@@ -371,6 +399,7 @@ package testing
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
@@ -379,6 +408,7 @@ import (
|
||||
"io"
|
||||
"math/rand"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"runtime/debug"
|
||||
@@ -632,6 +662,9 @@ type common struct {
|
||||
tempDir string
|
||||
tempDirErr error
|
||||
tempDirSeq int32
|
||||
|
||||
ctx context.Context
|
||||
cancelCtx context.CancelFunc
|
||||
}
|
||||
|
||||
// Short reports whether the -test.short flag is set.
|
||||
@@ -891,11 +924,13 @@ type TB interface {
|
||||
Logf(format string, args ...any)
|
||||
Name() string
|
||||
Setenv(key, value string)
|
||||
Chdir(dir string)
|
||||
Skip(args ...any)
|
||||
SkipNow()
|
||||
Skipf(format string, args ...any)
|
||||
Skipped() bool
|
||||
TempDir() string
|
||||
Context() context.Context
|
||||
|
||||
// A private method to prevent users implementing the
|
||||
// interface and so future additions to it will not
|
||||
@@ -917,8 +952,8 @@ var _ TB = (*B)(nil)
|
||||
// may be called simultaneously from multiple goroutines.
|
||||
type T struct {
|
||||
common
|
||||
isEnvSet bool
|
||||
context *testContext // For running tests and subtests.
|
||||
denyParallel bool
|
||||
tstate *testState // For running tests and subtests.
|
||||
}
|
||||
|
||||
func (c *common) private() {}
|
||||
@@ -1307,6 +1342,58 @@ func (c *common) Setenv(key, value string) {
|
||||
}
|
||||
}
|
||||
|
||||
// Chdir calls os.Chdir(dir) and uses Cleanup to restore the current
|
||||
// working directory to its original value after the test. On Unix, it
|
||||
// also sets PWD environment variable for the duration of the test.
|
||||
//
|
||||
// Because Chdir affects the whole process, it cannot be used
|
||||
// in parallel tests or tests with parallel ancestors.
|
||||
func (c *common) Chdir(dir string) {
|
||||
c.checkFuzzFn("Chdir")
|
||||
oldwd, err := os.Open(".")
|
||||
if err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
if err := os.Chdir(dir); err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
// On POSIX platforms, PWD represents “an absolute pathname of the
|
||||
// current working directory.” Since we are changing the working
|
||||
// directory, we should also set or update PWD to reflect that.
|
||||
switch runtime.GOOS {
|
||||
case "windows", "plan9":
|
||||
// Windows and Plan 9 do not use the PWD variable.
|
||||
default:
|
||||
if !filepath.IsAbs(dir) {
|
||||
dir, err = os.Getwd()
|
||||
if err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
}
|
||||
c.Setenv("PWD", dir)
|
||||
}
|
||||
c.Cleanup(func() {
|
||||
err := oldwd.Chdir()
|
||||
oldwd.Close()
|
||||
if err != nil {
|
||||
// It's not safe to continue with tests if we can't
|
||||
// get back to the original working directory. Since
|
||||
// we are holding a dirfd, this is highly unlikely.
|
||||
panic("testing.Chdir: " + err.Error())
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Context returns a context that is canceled just before
|
||||
// Cleanup-registered functions are called.
|
||||
//
|
||||
// Cleanup functions can wait for any resources
|
||||
// that shut down on Context.Done before the test or benchmark completes.
|
||||
func (c *common) Context() context.Context {
|
||||
c.checkFuzzFn("Context")
|
||||
return c.ctx
|
||||
}
|
||||
|
||||
// panicHandling controls the panic handling used by runCleanup.
|
||||
type panicHandling int
|
||||
|
||||
@@ -1339,6 +1426,10 @@ func (c *common) runCleanup(ph panicHandling) (panicVal any) {
|
||||
}
|
||||
}()
|
||||
|
||||
if c.cancelCtx != nil {
|
||||
c.cancelCtx()
|
||||
}
|
||||
|
||||
for {
|
||||
var cleanup func()
|
||||
c.mu.Lock()
|
||||
@@ -1436,6 +1527,8 @@ func pcToName(pc uintptr) string {
|
||||
return frame.Function
|
||||
}
|
||||
|
||||
const parallelConflict = `testing: test using t.Setenv or t.Chdir can not use t.Parallel`
|
||||
|
||||
// Parallel signals that this test is to be run in parallel with (and only with)
|
||||
// other parallel tests. When a test is run multiple times due to use of
|
||||
// -test.count or -test.cpu, multiple instances of a single test never run in
|
||||
@@ -1444,8 +1537,8 @@ func (t *T) Parallel() {
|
||||
if t.isParallel {
|
||||
panic("testing: t.Parallel called multiple times")
|
||||
}
|
||||
if t.isEnvSet {
|
||||
panic("testing: t.Parallel called after t.Setenv; cannot set environment variables in parallel tests")
|
||||
if t.denyParallel {
|
||||
panic(parallelConflict)
|
||||
}
|
||||
t.isParallel = true
|
||||
if t.parent.barrier == nil {
|
||||
@@ -1482,7 +1575,7 @@ func (t *T) Parallel() {
|
||||
|
||||
t.signal <- true // Release calling test.
|
||||
<-t.parent.barrier // Wait for the parent test to complete.
|
||||
t.context.waitParallel()
|
||||
t.tstate.waitParallel()
|
||||
|
||||
if t.chatty != nil {
|
||||
t.chatty.Updatef(t.name, "=== CONT %s\n", t.name)
|
||||
@@ -1500,6 +1593,21 @@ func (t *T) Parallel() {
|
||||
t.lastRaceErrors.Store(int64(race.Errors()))
|
||||
}
|
||||
|
||||
func (t *T) checkParallel() {
|
||||
// Non-parallel subtests that have parallel ancestors may still
|
||||
// run in parallel with other tests: they are only non-parallel
|
||||
// with respect to the other subtests of the same parent.
|
||||
// Since calls like SetEnv or Chdir affects the whole process, we need
|
||||
// to deny those if the current test or any parent is parallel.
|
||||
for c := &t.common; c != nil; c = c.parent {
|
||||
if c.isParallel {
|
||||
panic(parallelConflict)
|
||||
}
|
||||
}
|
||||
|
||||
t.denyParallel = true
|
||||
}
|
||||
|
||||
// Setenv calls os.Setenv(key, value) and uses Cleanup to
|
||||
// restore the environment variable to its original value
|
||||
// after the test.
|
||||
@@ -1507,27 +1615,21 @@ func (t *T) Parallel() {
|
||||
// Because Setenv affects the whole process, it cannot be used
|
||||
// in parallel tests or tests with parallel ancestors.
|
||||
func (t *T) Setenv(key, value string) {
|
||||
// Non-parallel subtests that have parallel ancestors may still
|
||||
// run in parallel with other tests: they are only non-parallel
|
||||
// with respect to the other subtests of the same parent.
|
||||
// Since SetEnv affects the whole process, we need to disallow it
|
||||
// if the current test or any parent is parallel.
|
||||
isParallel := false
|
||||
for c := &t.common; c != nil; c = c.parent {
|
||||
if c.isParallel {
|
||||
isParallel = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if isParallel {
|
||||
panic("testing: t.Setenv called after t.Parallel; cannot set environment variables in parallel tests")
|
||||
}
|
||||
|
||||
t.isEnvSet = true
|
||||
|
||||
t.checkParallel()
|
||||
t.common.Setenv(key, value)
|
||||
}
|
||||
|
||||
// Chdir calls os.Chdir(dir) and uses Cleanup to restore the current
|
||||
// working directory to its original value after the test. On Unix, it
|
||||
// also sets PWD environment variable for the duration of the test.
|
||||
//
|
||||
// Because Chdir affects the whole process, it cannot be used
|
||||
// in parallel tests or tests with parallel ancestors.
|
||||
func (t *T) Chdir(dir string) {
|
||||
t.checkParallel()
|
||||
t.common.Chdir(dir)
|
||||
}
|
||||
|
||||
// InternalTest is an internal type but exported because it is cross-package;
|
||||
// it is part of the implementation of the "go test" command.
|
||||
type InternalTest struct {
|
||||
@@ -1583,7 +1685,7 @@ func tRunner(t *T, fn func(t *T)) {
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil && t.context.isFuzzing {
|
||||
if err != nil && t.tstate.isFuzzing {
|
||||
prefix := "panic: "
|
||||
if err == errNilPanicOrGoexit {
|
||||
prefix = ""
|
||||
@@ -1641,7 +1743,7 @@ func tRunner(t *T, fn func(t *T)) {
|
||||
// Run parallel subtests.
|
||||
|
||||
// Decrease the running count for this test and mark it as no longer running.
|
||||
t.context.release()
|
||||
t.tstate.release()
|
||||
running.Delete(t.name)
|
||||
|
||||
// Release the parallel subtests.
|
||||
@@ -1663,12 +1765,12 @@ func tRunner(t *T, fn func(t *T)) {
|
||||
t.checkRaces()
|
||||
if !t.isParallel {
|
||||
// Reacquire the count for sequential tests. See comment in Run.
|
||||
t.context.waitParallel()
|
||||
t.tstate.waitParallel()
|
||||
}
|
||||
} else if t.isParallel {
|
||||
// Only release the count for this test if it was run as a parallel
|
||||
// test. See comment in Run method.
|
||||
t.context.release()
|
||||
t.tstate.release()
|
||||
}
|
||||
t.report() // Report after all subtests have finished.
|
||||
|
||||
@@ -1707,7 +1809,7 @@ func (t *T) Run(name string, f func(t *T)) bool {
|
||||
}
|
||||
|
||||
t.hasSub.Store(true)
|
||||
testName, ok, _ := t.context.match.fullName(&t.common, name)
|
||||
testName, ok, _ := t.tstate.match.fullName(&t.common, name)
|
||||
if !ok || shouldFailFast() {
|
||||
return true
|
||||
}
|
||||
@@ -1716,17 +1818,23 @@ func (t *T) Run(name string, f func(t *T)) bool {
|
||||
// continue walking the stack into the parent test.
|
||||
var pc [maxStackLen]uintptr
|
||||
n := runtime.Callers(2, pc[:])
|
||||
|
||||
// There's no reason to inherit this context from parent. The user's code can't observe
|
||||
// the difference between the background context and the one from the parent test.
|
||||
ctx, cancelCtx := context.WithCancel(context.Background())
|
||||
t = &T{
|
||||
common: common{
|
||||
barrier: make(chan bool),
|
||||
signal: make(chan bool, 1),
|
||||
name: testName,
|
||||
parent: &t.common,
|
||||
level: t.level + 1,
|
||||
creator: pc[:n],
|
||||
chatty: t.chatty,
|
||||
barrier: make(chan bool),
|
||||
signal: make(chan bool, 1),
|
||||
name: testName,
|
||||
parent: &t.common,
|
||||
level: t.level + 1,
|
||||
creator: pc[:n],
|
||||
chatty: t.chatty,
|
||||
ctx: ctx,
|
||||
cancelCtx: cancelCtx,
|
||||
},
|
||||
context: t.context,
|
||||
tstate: t.tstate,
|
||||
}
|
||||
t.w = indenter{&t.common}
|
||||
|
||||
@@ -1765,17 +1873,17 @@ func (t *T) Run(name string, f func(t *T)) bool {
|
||||
//
|
||||
// The ok result is false if the -timeout flag indicates “no timeout” (0).
|
||||
func (t *T) Deadline() (deadline time.Time, ok bool) {
|
||||
deadline = t.context.deadline
|
||||
deadline = t.tstate.deadline
|
||||
return deadline, !deadline.IsZero()
|
||||
}
|
||||
|
||||
// testContext holds all fields that are common to all tests. This includes
|
||||
// testState holds all fields that are common to all tests. This includes
|
||||
// synchronization primitives to run at most *parallel tests.
|
||||
type testContext struct {
|
||||
type testState struct {
|
||||
match *matcher
|
||||
deadline time.Time
|
||||
|
||||
// isFuzzing is true in the context used when generating random inputs
|
||||
// isFuzzing is true in the state used when generating random inputs
|
||||
// for fuzz targets. isFuzzing is false when running normal tests and
|
||||
// when running fuzz tests as unit tests (without -fuzz or when -fuzz
|
||||
// does not match).
|
||||
@@ -1797,8 +1905,8 @@ type testContext struct {
|
||||
maxParallel int
|
||||
}
|
||||
|
||||
func newTestContext(maxParallel int, m *matcher) *testContext {
|
||||
return &testContext{
|
||||
func newTestState(maxParallel int, m *matcher) *testState {
|
||||
return &testState{
|
||||
match: m,
|
||||
startParallel: make(chan bool),
|
||||
maxParallel: maxParallel,
|
||||
@@ -1806,28 +1914,28 @@ func newTestContext(maxParallel int, m *matcher) *testContext {
|
||||
}
|
||||
}
|
||||
|
||||
func (c *testContext) waitParallel() {
|
||||
c.mu.Lock()
|
||||
if c.running < c.maxParallel {
|
||||
c.running++
|
||||
c.mu.Unlock()
|
||||
func (s *testState) waitParallel() {
|
||||
s.mu.Lock()
|
||||
if s.running < s.maxParallel {
|
||||
s.running++
|
||||
s.mu.Unlock()
|
||||
return
|
||||
}
|
||||
c.numWaiting++
|
||||
c.mu.Unlock()
|
||||
<-c.startParallel
|
||||
s.numWaiting++
|
||||
s.mu.Unlock()
|
||||
<-s.startParallel
|
||||
}
|
||||
|
||||
func (c *testContext) release() {
|
||||
c.mu.Lock()
|
||||
if c.numWaiting == 0 {
|
||||
c.running--
|
||||
c.mu.Unlock()
|
||||
func (s *testState) release() {
|
||||
s.mu.Lock()
|
||||
if s.numWaiting == 0 {
|
||||
s.running--
|
||||
s.mu.Unlock()
|
||||
return
|
||||
}
|
||||
c.numWaiting--
|
||||
c.mu.Unlock()
|
||||
c.startParallel <- true // Pick a waiting test to be run.
|
||||
s.numWaiting--
|
||||
s.mu.Unlock()
|
||||
s.startParallel <- true // Pick a waiting test to be run.
|
||||
}
|
||||
|
||||
// No one should be using func Main anymore.
|
||||
@@ -2150,15 +2258,18 @@ func runTests(matchString func(pat, str string) (bool, error), tests []InternalT
|
||||
// to keep trying.
|
||||
break
|
||||
}
|
||||
ctx := newTestContext(*parallel, newMatcher(matchString, *match, "-test.run", *skip))
|
||||
ctx.deadline = deadline
|
||||
ctx, cancelCtx := context.WithCancel(context.Background())
|
||||
tstate := newTestState(*parallel, newMatcher(matchString, *match, "-test.run", *skip))
|
||||
tstate.deadline = deadline
|
||||
t := &T{
|
||||
common: common{
|
||||
signal: make(chan bool, 1),
|
||||
barrier: make(chan bool),
|
||||
w: os.Stdout,
|
||||
signal: make(chan bool, 1),
|
||||
barrier: make(chan bool),
|
||||
w: os.Stdout,
|
||||
ctx: ctx,
|
||||
cancelCtx: cancelCtx,
|
||||
},
|
||||
context: ctx,
|
||||
tstate: tstate,
|
||||
}
|
||||
if Verbose() {
|
||||
t.chatty = newChattyPrinter(t.w)
|
||||
|
||||
@@ -6,6 +6,8 @@ package testing_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"internal/race"
|
||||
"internal/testenv"
|
||||
@@ -13,6 +15,7 @@ import (
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"slices"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -200,64 +203,177 @@ func TestSetenv(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetenvWithParallelAfterSetenv(t *testing.T) {
|
||||
defer func() {
|
||||
want := "testing: t.Parallel called after t.Setenv; cannot set environment variables in parallel tests"
|
||||
if got := recover(); got != want {
|
||||
t.Fatalf("expected panic; got %#v want %q", got, want)
|
||||
}
|
||||
}()
|
||||
func expectParallelConflict(t *testing.T) {
|
||||
want := testing.ParallelConflict
|
||||
if got := recover(); got != want {
|
||||
t.Fatalf("expected panic; got %#v want %q", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
t.Setenv("GO_TEST_KEY_1", "value")
|
||||
func testWithParallelAfter(t *testing.T, fn func(*testing.T)) {
|
||||
defer expectParallelConflict(t)
|
||||
|
||||
fn(t)
|
||||
t.Parallel()
|
||||
}
|
||||
|
||||
func TestSetenvWithParallelBeforeSetenv(t *testing.T) {
|
||||
defer func() {
|
||||
want := "testing: t.Setenv called after t.Parallel; cannot set environment variables in parallel tests"
|
||||
if got := recover(); got != want {
|
||||
t.Fatalf("expected panic; got %#v want %q", got, want)
|
||||
}
|
||||
}()
|
||||
func testWithParallelBefore(t *testing.T, fn func(*testing.T)) {
|
||||
defer expectParallelConflict(t)
|
||||
|
||||
t.Parallel()
|
||||
|
||||
t.Setenv("GO_TEST_KEY_1", "value")
|
||||
fn(t)
|
||||
}
|
||||
|
||||
func TestSetenvWithParallelParentBeforeSetenv(t *testing.T) {
|
||||
func testWithParallelParentBefore(t *testing.T, fn func(*testing.T)) {
|
||||
t.Parallel()
|
||||
|
||||
t.Run("child", func(t *testing.T) {
|
||||
defer func() {
|
||||
want := "testing: t.Setenv called after t.Parallel; cannot set environment variables in parallel tests"
|
||||
if got := recover(); got != want {
|
||||
t.Fatalf("expected panic; got %#v want %q", got, want)
|
||||
}
|
||||
}()
|
||||
defer expectParallelConflict(t)
|
||||
|
||||
t.Setenv("GO_TEST_KEY_1", "value")
|
||||
fn(t)
|
||||
})
|
||||
}
|
||||
|
||||
func TestSetenvWithParallelGrandParentBeforeSetenv(t *testing.T) {
|
||||
func testWithParallelGrandParentBefore(t *testing.T, fn func(*testing.T)) {
|
||||
t.Parallel()
|
||||
|
||||
t.Run("child", func(t *testing.T) {
|
||||
t.Run("grand-child", func(t *testing.T) {
|
||||
defer func() {
|
||||
want := "testing: t.Setenv called after t.Parallel; cannot set environment variables in parallel tests"
|
||||
if got := recover(); got != want {
|
||||
t.Fatalf("expected panic; got %#v want %q", got, want)
|
||||
}
|
||||
}()
|
||||
defer expectParallelConflict(t)
|
||||
|
||||
t.Setenv("GO_TEST_KEY_1", "value")
|
||||
fn(t)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func tSetenv(t *testing.T) {
|
||||
t.Setenv("GO_TEST_KEY_1", "value")
|
||||
}
|
||||
|
||||
func TestSetenvWithParallelAfter(t *testing.T) {
|
||||
testWithParallelAfter(t, tSetenv)
|
||||
}
|
||||
|
||||
func TestSetenvWithParallelBefore(t *testing.T) {
|
||||
testWithParallelBefore(t, tSetenv)
|
||||
}
|
||||
|
||||
func TestSetenvWithParallelParentBefore(t *testing.T) {
|
||||
testWithParallelParentBefore(t, tSetenv)
|
||||
}
|
||||
|
||||
func TestSetenvWithParallelGrandParentBefore(t *testing.T) {
|
||||
testWithParallelGrandParentBefore(t, tSetenv)
|
||||
}
|
||||
|
||||
func tChdir(t *testing.T) {
|
||||
t.Chdir(t.TempDir())
|
||||
}
|
||||
|
||||
func TestChdirWithParallelAfter(t *testing.T) {
|
||||
testWithParallelAfter(t, tChdir)
|
||||
}
|
||||
|
||||
func TestChdirWithParallelBefore(t *testing.T) {
|
||||
testWithParallelBefore(t, tChdir)
|
||||
}
|
||||
|
||||
func TestChdirWithParallelParentBefore(t *testing.T) {
|
||||
testWithParallelParentBefore(t, tChdir)
|
||||
}
|
||||
|
||||
func TestChdirWithParallelGrandParentBefore(t *testing.T) {
|
||||
testWithParallelGrandParentBefore(t, tChdir)
|
||||
}
|
||||
|
||||
func TestChdir(t *testing.T) {
|
||||
oldDir, err := os.Getwd()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.Chdir(oldDir)
|
||||
|
||||
// The "relative" test case relies on tmp not being a symlink.
|
||||
tmp, err := filepath.EvalSymlinks(t.TempDir())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
rel, err := filepath.Rel(oldDir, tmp)
|
||||
if err != nil {
|
||||
// If GOROOT is on C: volume and tmp is on the D: volume, there
|
||||
// is no relative path between them, so skip that test case.
|
||||
rel = "skip"
|
||||
}
|
||||
|
||||
for _, tc := range []struct {
|
||||
name, dir, pwd string
|
||||
extraChdir bool
|
||||
}{
|
||||
{
|
||||
name: "absolute",
|
||||
dir: tmp,
|
||||
pwd: tmp,
|
||||
},
|
||||
{
|
||||
name: "relative",
|
||||
dir: rel,
|
||||
pwd: tmp,
|
||||
},
|
||||
{
|
||||
name: "current (absolute)",
|
||||
dir: oldDir,
|
||||
pwd: oldDir,
|
||||
},
|
||||
{
|
||||
name: "current (relative) with extra os.Chdir",
|
||||
dir: ".",
|
||||
pwd: oldDir,
|
||||
|
||||
extraChdir: true,
|
||||
},
|
||||
} {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
if tc.dir == "skip" {
|
||||
t.Skipf("skipping test because there is no relative path between %s and %s", oldDir, tmp)
|
||||
}
|
||||
if !filepath.IsAbs(tc.pwd) {
|
||||
t.Fatalf("Bad tc.pwd: %q (must be absolute)", tc.pwd)
|
||||
}
|
||||
|
||||
t.Chdir(tc.dir)
|
||||
|
||||
newDir, err := os.Getwd()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if newDir != tc.pwd {
|
||||
t.Fatalf("failed to chdir to %q: getwd: got %q, want %q", tc.dir, newDir, tc.pwd)
|
||||
}
|
||||
|
||||
switch runtime.GOOS {
|
||||
case "windows", "plan9":
|
||||
// Windows and Plan 9 do not use the PWD variable.
|
||||
default:
|
||||
if pwd := os.Getenv("PWD"); pwd != tc.pwd {
|
||||
t.Fatalf("PWD: got %q, want %q", pwd, tc.pwd)
|
||||
}
|
||||
}
|
||||
|
||||
if tc.extraChdir {
|
||||
os.Chdir("..")
|
||||
}
|
||||
})
|
||||
|
||||
newDir, err := os.Getwd()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if newDir != oldDir {
|
||||
t.Fatalf("failed to restore wd to %s: getwd: %s", oldDir, newDir)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// testingTrueInInit is part of TestTesting.
|
||||
var testingTrueInInit = false
|
||||
|
||||
@@ -324,12 +440,7 @@ func runTest(t *testing.T, test string) []byte {
|
||||
|
||||
testenv.MustHaveExec(t)
|
||||
|
||||
exe, err := os.Executable()
|
||||
if err != nil {
|
||||
t.Skipf("can't find test executable: %v", err)
|
||||
}
|
||||
|
||||
cmd := testenv.Command(t, exe, "-test.run=^"+test+"$", "-test.bench="+test, "-test.v", "-test.parallel=2", "-test.benchtime=2x")
|
||||
cmd := testenv.Command(t, testenv.Executable(t), "-test.run=^"+test+"$", "-test.bench="+test, "-test.v", "-test.parallel=2", "-test.benchtime=2x")
|
||||
cmd = testenv.CleanCmdEnv(cmd)
|
||||
cmd.Env = append(cmd.Env, "GO_WANT_HELPER_PROCESS=1")
|
||||
out, err := cmd.CombinedOutput()
|
||||
@@ -558,14 +669,7 @@ func TestRaceBeforeParallel(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestRaceBeforeTests(t *testing.T) {
|
||||
testenv.MustHaveExec(t)
|
||||
|
||||
exe, err := os.Executable()
|
||||
if err != nil {
|
||||
t.Skipf("can't find test executable: %v", err)
|
||||
}
|
||||
|
||||
cmd := testenv.Command(t, exe, "-test.run=^$")
|
||||
cmd := testenv.Command(t, testenv.Executable(t), "-test.run=^$")
|
||||
cmd = testenv.CleanCmdEnv(cmd)
|
||||
cmd.Env = append(cmd.Env, "GO_WANT_RACE_BEFORE_TESTS=1")
|
||||
out, _ := cmd.CombinedOutput()
|
||||
@@ -596,6 +700,20 @@ func TestBenchmarkRace(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestBenchmarkRaceBLoop(t *testing.T) {
|
||||
out := runTest(t, "BenchmarkBLoopRacy")
|
||||
c := bytes.Count(out, []byte("race detected during execution of test"))
|
||||
|
||||
want := 0
|
||||
// We should see one race detector report.
|
||||
if race.Enabled {
|
||||
want = 1
|
||||
}
|
||||
if c != want {
|
||||
t.Errorf("got %d race reports; want %d", c, want)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkRacy(b *testing.B) {
|
||||
if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" {
|
||||
b.Skipf("skipping intentionally-racy benchmark")
|
||||
@@ -605,15 +723,25 @@ func BenchmarkRacy(b *testing.B) {
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkBLoopRacy(b *testing.B) {
|
||||
if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" {
|
||||
b.Skipf("skipping intentionally-racy benchmark")
|
||||
}
|
||||
for b.Loop() {
|
||||
doRace()
|
||||
}
|
||||
}
|
||||
|
||||
func TestBenchmarkSubRace(t *testing.T) {
|
||||
out := runTest(t, "BenchmarkSubRacy")
|
||||
c := bytes.Count(out, []byte("race detected during execution of test"))
|
||||
|
||||
want := 0
|
||||
// We should see two race detector reports:
|
||||
// one in the sub-bencmark, and one in the parent afterward.
|
||||
// We should see 3 race detector reports:
|
||||
// one in the sub-bencmark, one in the parent afterward,
|
||||
// and one in b.Loop.
|
||||
if race.Enabled {
|
||||
want = 2
|
||||
want = 3
|
||||
}
|
||||
if c != want {
|
||||
t.Errorf("got %d race reports; want %d", c, want)
|
||||
@@ -639,6 +767,12 @@ func BenchmarkSubRacy(b *testing.B) {
|
||||
}
|
||||
})
|
||||
|
||||
b.Run("racy-bLoop", func(b *testing.B) {
|
||||
for b.Loop() {
|
||||
doRace()
|
||||
}
|
||||
})
|
||||
|
||||
doRace() // should be reported separately
|
||||
}
|
||||
|
||||
@@ -813,3 +947,85 @@ func TestParentRun(t1 *testing.T) {
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func TestContext(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
if err := ctx.Err(); err != nil {
|
||||
t.Fatalf("expected non-canceled context, got %v", err)
|
||||
}
|
||||
|
||||
var innerCtx context.Context
|
||||
t.Run("inner", func(t *testing.T) {
|
||||
innerCtx = t.Context()
|
||||
if err := innerCtx.Err(); err != nil {
|
||||
t.Fatalf("expected inner test to not inherit canceled context, got %v", err)
|
||||
}
|
||||
})
|
||||
t.Run("inner2", func(t *testing.T) {
|
||||
if !errors.Is(innerCtx.Err(), context.Canceled) {
|
||||
t.Fatal("expected context of sibling test to be canceled after its test function finished")
|
||||
}
|
||||
})
|
||||
|
||||
t.Cleanup(func() {
|
||||
if !errors.Is(ctx.Err(), context.Canceled) {
|
||||
t.Fatal("expected context canceled before cleanup")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestBenchmarkBLoopIterationCorrect(t *testing.T) {
|
||||
out := runTest(t, "BenchmarkBLoopPrint")
|
||||
c := bytes.Count(out, []byte("Printing from BenchmarkBLoopPrint"))
|
||||
|
||||
want := 2
|
||||
if c != want {
|
||||
t.Errorf("got %d loop iterations; want %d", c, want)
|
||||
}
|
||||
|
||||
// b.Loop() will only rampup once.
|
||||
c = bytes.Count(out, []byte("Ramping up from BenchmarkBLoopPrint"))
|
||||
want = 1
|
||||
if c != want {
|
||||
t.Errorf("got %d loop rampup; want %d", c, want)
|
||||
}
|
||||
|
||||
re := regexp.MustCompile(`BenchmarkBLoopPrint(-[0-9]+)?\s+2\s+[0-9]+\s+ns/op`)
|
||||
if !re.Match(out) {
|
||||
t.Error("missing benchmark output")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBenchmarkBNIterationCorrect(t *testing.T) {
|
||||
out := runTest(t, "BenchmarkBNPrint")
|
||||
c := bytes.Count(out, []byte("Printing from BenchmarkBNPrint"))
|
||||
|
||||
// runTest sets benchtime=2x, with semantics specified in #32051 it should
|
||||
// run 3 times.
|
||||
want := 3
|
||||
if c != want {
|
||||
t.Errorf("got %d loop iterations; want %d", c, want)
|
||||
}
|
||||
|
||||
// b.N style fixed iteration loop will rampup twice:
|
||||
// One in run1(), the other in launch
|
||||
c = bytes.Count(out, []byte("Ramping up from BenchmarkBNPrint"))
|
||||
want = 2
|
||||
if c != want {
|
||||
t.Errorf("got %d loop rampup; want %d", c, want)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkBLoopPrint(b *testing.B) {
|
||||
b.Logf("Ramping up from BenchmarkBLoopPrint")
|
||||
for b.Loop() {
|
||||
b.Logf("Printing from BenchmarkBLoopPrint")
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkBNPrint(b *testing.B) {
|
||||
b.Logf("Ramping up from BenchmarkBNPrint")
|
||||
for i := 0; i < b.N; i++ {
|
||||
b.Logf("Printing from BenchmarkBNPrint")
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user