Initial commit: Go 1.23 release state

This commit is contained in:
Vorapol Rinsatitnon
2024-09-21 23:49:08 +10:00
commit 17cd57a668
13231 changed files with 3114330 additions and 0 deletions

45
src/testing/allocs.go Normal file
View File

@@ -0,0 +1,45 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package testing
import (
"runtime"
)
// AllocsPerRun returns the average number of allocations during calls to f.
// Although the return value has type float64, it will always be an integral value.
//
// To compute the number of allocations, the function will first be run once as
// a warm-up. The average number of allocations over the specified number of
// runs will then be measured and returned.
//
// AllocsPerRun sets GOMAXPROCS to 1 during its measurement and will restore
// it before returning.
func AllocsPerRun(runs int, f func()) (avg float64) {
defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(1))
// Warm up the function
f()
// Measure the starting statistics
var memstats runtime.MemStats
runtime.ReadMemStats(&memstats)
mallocs := 0 - memstats.Mallocs
// Run the function the specified number of times
for i := 0; i < runs; i++ {
f()
}
// Read the final statistics
runtime.ReadMemStats(&memstats)
mallocs += memstats.Mallocs
// Average the mallocs over the runs (not counting the warm-up).
// We are forced to return a float64 because the API is silly, but do
// the division as integers so we can ask if AllocsPerRun()==1
// instead of AllocsPerRun()<2.
return float64(mallocs / uint64(runs))
}

View File

@@ -0,0 +1,29 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package testing_test
import "testing"
var global any
var allocsPerRunTests = []struct {
name string
fn func()
allocs float64
}{
{"alloc *byte", func() { global = new(*byte) }, 1},
{"alloc complex128", func() { global = new(complex128) }, 1},
{"alloc float64", func() { global = new(float64) }, 1},
{"alloc int32", func() { global = new(int32) }, 1},
{"alloc byte", func() { global = new(byte) }, 1},
}
func TestAllocsPerRun(t *testing.T) {
for _, tt := range allocsPerRunTests {
if allocs := testing.AllocsPerRun(100, tt.fn); allocs != tt.allocs {
t.Errorf("AllocsPerRun(100, %s) = %v, want %v", tt.name, allocs, tt.allocs)
}
}
}

840
src/testing/benchmark.go Normal file
View File

@@ -0,0 +1,840 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package testing
import (
"flag"
"fmt"
"internal/sysinfo"
"io"
"math"
"os"
"runtime"
"slices"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"unicode"
)
func initBenchmarkFlags() {
matchBenchmarks = flag.String("test.bench", "", "run only benchmarks matching `regexp`")
benchmarkMemory = flag.Bool("test.benchmem", false, "print memory allocations for benchmarks")
flag.Var(&benchTime, "test.benchtime", "run each benchmark for duration `d` or N times if `d` is of the form Nx")
}
var (
matchBenchmarks *string
benchmarkMemory *bool
benchTime = durationOrCountFlag{d: 1 * time.Second} // changed during test of testing package
)
type durationOrCountFlag struct {
d time.Duration
n int
allowZero bool
}
func (f *durationOrCountFlag) String() string {
if f.n > 0 {
return fmt.Sprintf("%dx", f.n)
}
return f.d.String()
}
func (f *durationOrCountFlag) Set(s string) error {
if strings.HasSuffix(s, "x") {
n, err := strconv.ParseInt(s[:len(s)-1], 10, 0)
if err != nil || n < 0 || (!f.allowZero && n == 0) {
return fmt.Errorf("invalid count")
}
*f = durationOrCountFlag{n: int(n)}
return nil
}
d, err := time.ParseDuration(s)
if err != nil || d < 0 || (!f.allowZero && d == 0) {
return fmt.Errorf("invalid duration")
}
*f = durationOrCountFlag{d: d}
return nil
}
// Global lock to ensure only one benchmark runs at a time.
var benchmarkLock sync.Mutex
// Used for every benchmark for measuring memory.
var memStats runtime.MemStats
// InternalBenchmark is an internal type but exported because it is cross-package;
// it is part of the implementation of the "go test" command.
type InternalBenchmark struct {
Name string
F func(b *B)
}
// B is a type passed to [Benchmark] functions to manage benchmark
// timing and to specify the number of iterations to run.
//
// A benchmark ends when its Benchmark function returns or calls any of the methods
// FailNow, Fatal, Fatalf, SkipNow, Skip, or Skipf. Those methods must be called
// only from the goroutine running the Benchmark function.
// The other reporting methods, such as the variations of Log and Error,
// may be called simultaneously from multiple goroutines.
//
// Like in tests, benchmark logs are accumulated during execution
// and dumped to standard output when done. Unlike in tests, benchmark logs
// are always printed, so as not to hide output whose existence may be
// affecting benchmark results.
type B struct {
common
importPath string // import path of the package containing the benchmark
context *benchContext
N int
previousN int // number of iterations in the previous run
previousDuration time.Duration // total duration of the previous run
benchFunc func(b *B)
benchTime durationOrCountFlag
bytes int64
missingBytes bool // one of the subbenchmarks does not have bytes set.
timerOn bool
showAllocResult bool
result BenchmarkResult
parallelism int // RunParallel creates parallelism*GOMAXPROCS goroutines
// The initial states of memStats.Mallocs and memStats.TotalAlloc.
startAllocs uint64
startBytes uint64
// The net total of this test after being run.
netAllocs uint64
netBytes uint64
// Extra metrics collected by ReportMetric.
extra map[string]float64
}
// StartTimer starts timing a test. This function is called automatically
// before a benchmark starts, but it can also be used to resume timing after
// a call to [B.StopTimer].
func (b *B) StartTimer() {
if !b.timerOn {
runtime.ReadMemStats(&memStats)
b.startAllocs = memStats.Mallocs
b.startBytes = memStats.TotalAlloc
b.start = highPrecisionTimeNow()
b.timerOn = true
}
}
// StopTimer stops timing a test. This can be used to pause the timer
// while performing complex initialization that you don't
// want to measure.
func (b *B) StopTimer() {
if b.timerOn {
b.duration += highPrecisionTimeSince(b.start)
runtime.ReadMemStats(&memStats)
b.netAllocs += memStats.Mallocs - b.startAllocs
b.netBytes += memStats.TotalAlloc - b.startBytes
b.timerOn = false
}
}
// ResetTimer zeroes the elapsed benchmark time and memory allocation counters
// and deletes user-reported metrics.
// It does not affect whether the timer is running.
func (b *B) ResetTimer() {
if b.extra == nil {
// Allocate the extra map before reading memory stats.
// Pre-size it to make more allocation unlikely.
b.extra = make(map[string]float64, 16)
} else {
clear(b.extra)
}
if b.timerOn {
runtime.ReadMemStats(&memStats)
b.startAllocs = memStats.Mallocs
b.startBytes = memStats.TotalAlloc
b.start = highPrecisionTimeNow()
}
b.duration = 0
b.netAllocs = 0
b.netBytes = 0
}
// SetBytes records the number of bytes processed in a single operation.
// If this is called, the benchmark will report ns/op and MB/s.
func (b *B) SetBytes(n int64) { b.bytes = n }
// ReportAllocs enables malloc statistics for this benchmark.
// It is equivalent to setting -test.benchmem, but it only affects the
// benchmark function that calls ReportAllocs.
func (b *B) ReportAllocs() {
b.showAllocResult = true
}
// runN runs a single benchmark for the specified number of iterations.
func (b *B) runN(n int) {
benchmarkLock.Lock()
defer benchmarkLock.Unlock()
defer func() {
b.runCleanup(normalPanic)
b.checkRaces()
}()
// Try to get a comparable environment for each run
// by clearing garbage from previous runs.
runtime.GC()
b.resetRaces()
b.N = n
b.parallelism = 1
b.ResetTimer()
b.StartTimer()
b.benchFunc(b)
b.StopTimer()
b.previousN = n
b.previousDuration = b.duration
}
// run1 runs the first iteration of benchFunc. It reports whether more
// iterations of this benchmarks should be run.
func (b *B) run1() bool {
if ctx := b.context; ctx != nil {
// Extend maxLen, if needed.
if n := len(b.name) + ctx.extLen + 1; n > ctx.maxLen {
ctx.maxLen = n + 8 // Add additional slack to avoid too many jumps in size.
}
}
go func() {
// Signal that we're done whether we return normally
// or by FailNow's runtime.Goexit.
defer func() {
b.signal <- true
}()
b.runN(1)
}()
<-b.signal
if b.failed {
fmt.Fprintf(b.w, "%s--- FAIL: %s\n%s", b.chatty.prefix(), b.name, b.output)
return false
}
// Only print the output if we know we are not going to proceed.
// Otherwise it is printed in processBench.
b.mu.RLock()
finished := b.finished
b.mu.RUnlock()
if b.hasSub.Load() || finished {
tag := "BENCH"
if b.skipped {
tag = "SKIP"
}
if b.chatty != nil && (len(b.output) > 0 || finished) {
b.trimOutput()
fmt.Fprintf(b.w, "%s--- %s: %s\n%s", b.chatty.prefix(), tag, b.name, b.output)
}
return false
}
return true
}
var labelsOnce sync.Once
// run executes the benchmark in a separate goroutine, including all of its
// subbenchmarks. b must not have subbenchmarks.
func (b *B) run() {
labelsOnce.Do(func() {
fmt.Fprintf(b.w, "goos: %s\n", runtime.GOOS)
fmt.Fprintf(b.w, "goarch: %s\n", runtime.GOARCH)
if b.importPath != "" {
fmt.Fprintf(b.w, "pkg: %s\n", b.importPath)
}
if cpu := sysinfo.CPUName(); cpu != "" {
fmt.Fprintf(b.w, "cpu: %s\n", cpu)
}
})
if b.context != nil {
// Running go test --test.bench
b.context.processBench(b) // Must call doBench.
} else {
// Running func Benchmark.
b.doBench()
}
}
func (b *B) doBench() BenchmarkResult {
go b.launch()
<-b.signal
return b.result
}
// launch launches the benchmark function. It gradually increases the number
// of benchmark iterations until the benchmark runs for the requested benchtime.
// launch is run by the doBench function as a separate goroutine.
// run1 must have been called on b.
func (b *B) launch() {
// Signal that we're done whether we return normally
// or by FailNow's runtime.Goexit.
defer func() {
b.signal <- true
}()
// Run the benchmark for at least the specified amount of time.
if b.benchTime.n > 0 {
// We already ran a single iteration in run1.
// If -benchtime=1x was requested, use that result.
// See https://golang.org/issue/32051.
if b.benchTime.n > 1 {
b.runN(b.benchTime.n)
}
} else {
d := b.benchTime.d
for n := int64(1); !b.failed && b.duration < d && n < 1e9; {
last := n
// Predict required iterations.
goalns := d.Nanoseconds()
prevIters := int64(b.N)
prevns := b.duration.Nanoseconds()
if prevns <= 0 {
// Round up, to avoid div by zero.
prevns = 1
}
// Order of operations matters.
// For very fast benchmarks, prevIters ~= prevns.
// If you divide first, you get 0 or 1,
// which can hide an order of magnitude in execution time.
// So multiply first, then divide.
n = goalns * prevIters / prevns
// Run more iterations than we think we'll need (1.2x).
n += n / 5
// Don't grow too fast in case we had timing errors previously.
n = min(n, 100*last)
// Be sure to run at least one more than last time.
n = max(n, last+1)
// Don't run more than 1e9 times. (This also keeps n in int range on 32 bit platforms.)
n = min(n, 1e9)
b.runN(int(n))
}
}
b.result = BenchmarkResult{b.N, b.duration, b.bytes, b.netAllocs, b.netBytes, b.extra}
}
// Elapsed returns the measured elapsed time of the benchmark.
// The duration reported by Elapsed matches the one measured by
// [B.StartTimer], [B.StopTimer], and [B.ResetTimer].
func (b *B) Elapsed() time.Duration {
d := b.duration
if b.timerOn {
d += highPrecisionTimeSince(b.start)
}
return d
}
// ReportMetric adds "n unit" to the reported benchmark results.
// If the metric is per-iteration, the caller should divide by b.N,
// and by convention units should end in "/op".
// ReportMetric overrides any previously reported value for the same unit.
// ReportMetric panics if unit is the empty string or if unit contains
// any whitespace.
// If unit is a unit normally reported by the benchmark framework itself
// (such as "allocs/op"), ReportMetric will override that metric.
// Setting "ns/op" to 0 will suppress that built-in metric.
func (b *B) ReportMetric(n float64, unit string) {
if unit == "" {
panic("metric unit must not be empty")
}
if strings.IndexFunc(unit, unicode.IsSpace) >= 0 {
panic("metric unit must not contain whitespace")
}
b.extra[unit] = n
}
// BenchmarkResult contains the results of a benchmark run.
type BenchmarkResult struct {
N int // The number of iterations.
T time.Duration // The total time taken.
Bytes int64 // Bytes processed in one iteration.
MemAllocs uint64 // The total number of memory allocations.
MemBytes uint64 // The total number of bytes allocated.
// Extra records additional metrics reported by ReportMetric.
Extra map[string]float64
}
// NsPerOp returns the "ns/op" metric.
func (r BenchmarkResult) NsPerOp() int64 {
if v, ok := r.Extra["ns/op"]; ok {
return int64(v)
}
if r.N <= 0 {
return 0
}
return r.T.Nanoseconds() / int64(r.N)
}
// mbPerSec returns the "MB/s" metric.
func (r BenchmarkResult) mbPerSec() float64 {
if v, ok := r.Extra["MB/s"]; ok {
return v
}
if r.Bytes <= 0 || r.T <= 0 || r.N <= 0 {
return 0
}
return (float64(r.Bytes) * float64(r.N) / 1e6) / r.T.Seconds()
}
// AllocsPerOp returns the "allocs/op" metric,
// which is calculated as r.MemAllocs / r.N.
func (r BenchmarkResult) AllocsPerOp() int64 {
if v, ok := r.Extra["allocs/op"]; ok {
return int64(v)
}
if r.N <= 0 {
return 0
}
return int64(r.MemAllocs) / int64(r.N)
}
// AllocedBytesPerOp returns the "B/op" metric,
// which is calculated as r.MemBytes / r.N.
func (r BenchmarkResult) AllocedBytesPerOp() int64 {
if v, ok := r.Extra["B/op"]; ok {
return int64(v)
}
if r.N <= 0 {
return 0
}
return int64(r.MemBytes) / int64(r.N)
}
// String returns a summary of the benchmark results.
// It follows the benchmark result line format from
// https://golang.org/design/14313-benchmark-format, not including the
// benchmark name.
// Extra metrics override built-in metrics of the same name.
// String does not include allocs/op or B/op, since those are reported
// by [BenchmarkResult.MemString].
func (r BenchmarkResult) String() string {
buf := new(strings.Builder)
fmt.Fprintf(buf, "%8d", r.N)
// Get ns/op as a float.
ns, ok := r.Extra["ns/op"]
if !ok {
ns = float64(r.T.Nanoseconds()) / float64(r.N)
}
if ns != 0 {
buf.WriteByte('\t')
prettyPrint(buf, ns, "ns/op")
}
if mbs := r.mbPerSec(); mbs != 0 {
fmt.Fprintf(buf, "\t%7.2f MB/s", mbs)
}
// Print extra metrics that aren't represented in the standard
// metrics.
var extraKeys []string
for k := range r.Extra {
switch k {
case "ns/op", "MB/s", "B/op", "allocs/op":
// Built-in metrics reported elsewhere.
continue
}
extraKeys = append(extraKeys, k)
}
slices.Sort(extraKeys)
for _, k := range extraKeys {
buf.WriteByte('\t')
prettyPrint(buf, r.Extra[k], k)
}
return buf.String()
}
func prettyPrint(w io.Writer, x float64, unit string) {
// Print all numbers with 10 places before the decimal point
// and small numbers with four sig figs. Field widths are
// chosen to fit the whole part in 10 places while aligning
// the decimal point of all fractional formats.
var format string
switch y := math.Abs(x); {
case y == 0 || y >= 999.95:
format = "%10.0f %s"
case y >= 99.995:
format = "%12.1f %s"
case y >= 9.9995:
format = "%13.2f %s"
case y >= 0.99995:
format = "%14.3f %s"
case y >= 0.099995:
format = "%15.4f %s"
case y >= 0.0099995:
format = "%16.5f %s"
case y >= 0.00099995:
format = "%17.6f %s"
default:
format = "%18.7f %s"
}
fmt.Fprintf(w, format, x, unit)
}
// MemString returns r.AllocedBytesPerOp and r.AllocsPerOp in the same format as 'go test'.
func (r BenchmarkResult) MemString() string {
return fmt.Sprintf("%8d B/op\t%8d allocs/op",
r.AllocedBytesPerOp(), r.AllocsPerOp())
}
// benchmarkName returns full name of benchmark including procs suffix.
func benchmarkName(name string, n int) string {
if n != 1 {
return fmt.Sprintf("%s-%d", name, n)
}
return name
}
type benchContext struct {
match *matcher
maxLen int // The largest recorded benchmark name.
extLen int // Maximum extension length.
}
// RunBenchmarks is an internal function but exported because it is cross-package;
// it is part of the implementation of the "go test" command.
func RunBenchmarks(matchString func(pat, str string) (bool, error), benchmarks []InternalBenchmark) {
runBenchmarks("", matchString, benchmarks)
}
func runBenchmarks(importPath string, matchString func(pat, str string) (bool, error), benchmarks []InternalBenchmark) bool {
// If no flag was specified, don't run benchmarks.
if len(*matchBenchmarks) == 0 {
return true
}
// Collect matching benchmarks and determine longest name.
maxprocs := 1
for _, procs := range cpuList {
if procs > maxprocs {
maxprocs = procs
}
}
ctx := &benchContext{
match: newMatcher(matchString, *matchBenchmarks, "-test.bench", *skip),
extLen: len(benchmarkName("", maxprocs)),
}
var bs []InternalBenchmark
for _, Benchmark := range benchmarks {
if _, matched, _ := ctx.match.fullName(nil, Benchmark.Name); matched {
bs = append(bs, Benchmark)
benchName := benchmarkName(Benchmark.Name, maxprocs)
if l := len(benchName) + ctx.extLen + 1; l > ctx.maxLen {
ctx.maxLen = l
}
}
}
main := &B{
common: common{
name: "Main",
w: os.Stdout,
bench: true,
},
importPath: importPath,
benchFunc: func(b *B) {
for _, Benchmark := range bs {
b.Run(Benchmark.Name, Benchmark.F)
}
},
benchTime: benchTime,
context: ctx,
}
if Verbose() {
main.chatty = newChattyPrinter(main.w)
}
main.runN(1)
return !main.failed
}
// processBench runs bench b for the configured CPU counts and prints the results.
func (ctx *benchContext) processBench(b *B) {
for i, procs := range cpuList {
for j := uint(0); j < *count; j++ {
runtime.GOMAXPROCS(procs)
benchName := benchmarkName(b.name, procs)
// If it's chatty, we've already printed this information.
if b.chatty == nil {
fmt.Fprintf(b.w, "%-*s\t", ctx.maxLen, benchName)
}
// Recompute the running time for all but the first iteration.
if i > 0 || j > 0 {
b = &B{
common: common{
signal: make(chan bool),
name: b.name,
w: b.w,
chatty: b.chatty,
bench: true,
},
benchFunc: b.benchFunc,
benchTime: b.benchTime,
}
b.run1()
}
r := b.doBench()
if b.failed {
// The output could be very long here, but probably isn't.
// We print it all, regardless, because we don't want to trim the reason
// the benchmark failed.
fmt.Fprintf(b.w, "%s--- FAIL: %s\n%s", b.chatty.prefix(), benchName, b.output)
continue
}
results := r.String()
if b.chatty != nil {
fmt.Fprintf(b.w, "%-*s\t", ctx.maxLen, benchName)
}
if *benchmarkMemory || b.showAllocResult {
results += "\t" + r.MemString()
}
fmt.Fprintln(b.w, results)
// Unlike with tests, we ignore the -chatty flag and always print output for
// benchmarks since the output generation time will skew the results.
if len(b.output) > 0 {
b.trimOutput()
fmt.Fprintf(b.w, "%s--- BENCH: %s\n%s", b.chatty.prefix(), benchName, b.output)
}
if p := runtime.GOMAXPROCS(-1); p != procs {
fmt.Fprintf(os.Stderr, "testing: %s left GOMAXPROCS set to %d\n", benchName, p)
}
if b.chatty != nil && b.chatty.json {
b.chatty.Updatef("", "=== NAME %s\n", "")
}
}
}
}
// If hideStdoutForTesting is true, Run does not print the benchName.
// This avoids a spurious print during 'go test' on package testing itself,
// which invokes b.Run in its own tests (see sub_test.go).
var hideStdoutForTesting = false
// Run benchmarks f as a subbenchmark with the given name. It reports
// whether there were any failures.
//
// A subbenchmark is like any other benchmark. A benchmark that calls Run at
// least once will not be measured itself and will be called once with N=1.
func (b *B) Run(name string, f func(b *B)) bool {
// Since b has subbenchmarks, we will no longer run it as a benchmark itself.
// Release the lock and acquire it on exit to ensure locks stay paired.
b.hasSub.Store(true)
benchmarkLock.Unlock()
defer benchmarkLock.Lock()
benchName, ok, partial := b.name, true, false
if b.context != nil {
benchName, ok, partial = b.context.match.fullName(&b.common, name)
}
if !ok {
return true
}
var pc [maxStackLen]uintptr
n := runtime.Callers(2, pc[:])
sub := &B{
common: common{
signal: make(chan bool),
name: benchName,
parent: &b.common,
level: b.level + 1,
creator: pc[:n],
w: b.w,
chatty: b.chatty,
bench: true,
},
importPath: b.importPath,
benchFunc: f,
benchTime: b.benchTime,
context: b.context,
}
if partial {
// Partial name match, like -bench=X/Y matching BenchmarkX.
// Only process sub-benchmarks, if any.
sub.hasSub.Store(true)
}
if b.chatty != nil {
labelsOnce.Do(func() {
fmt.Printf("goos: %s\n", runtime.GOOS)
fmt.Printf("goarch: %s\n", runtime.GOARCH)
if b.importPath != "" {
fmt.Printf("pkg: %s\n", b.importPath)
}
if cpu := sysinfo.CPUName(); cpu != "" {
fmt.Printf("cpu: %s\n", cpu)
}
})
if !hideStdoutForTesting {
if b.chatty.json {
b.chatty.Updatef(benchName, "=== RUN %s\n", benchName)
}
fmt.Println(benchName)
}
}
if sub.run1() {
sub.run()
}
b.add(sub.result)
return !sub.failed
}
// add simulates running benchmarks in sequence in a single iteration. It is
// used to give some meaningful results in case func Benchmark is used in
// combination with Run.
func (b *B) add(other BenchmarkResult) {
r := &b.result
// The aggregated BenchmarkResults resemble running all subbenchmarks as
// in sequence in a single benchmark.
r.N = 1
r.T += time.Duration(other.NsPerOp())
if other.Bytes == 0 {
// Summing Bytes is meaningless in aggregate if not all subbenchmarks
// set it.
b.missingBytes = true
r.Bytes = 0
}
if !b.missingBytes {
r.Bytes += other.Bytes
}
r.MemAllocs += uint64(other.AllocsPerOp())
r.MemBytes += uint64(other.AllocedBytesPerOp())
}
// trimOutput shortens the output from a benchmark, which can be very long.
func (b *B) trimOutput() {
// The output is likely to appear multiple times because the benchmark
// is run multiple times, but at least it will be seen. This is not a big deal
// because benchmarks rarely print, but just in case, we trim it if it's too long.
const maxNewlines = 10
for nlCount, j := 0, 0; j < len(b.output); j++ {
if b.output[j] == '\n' {
nlCount++
if nlCount >= maxNewlines {
b.output = append(b.output[:j], "\n\t... [output truncated]\n"...)
break
}
}
}
}
// A PB is used by RunParallel for running parallel benchmarks.
type PB struct {
globalN *atomic.Uint64 // shared between all worker goroutines iteration counter
grain uint64 // acquire that many iterations from globalN at once
cache uint64 // local cache of acquired iterations
bN uint64 // total number of iterations to execute (b.N)
}
// Next reports whether there are more iterations to execute.
func (pb *PB) Next() bool {
if pb.cache == 0 {
n := pb.globalN.Add(pb.grain)
if n <= pb.bN {
pb.cache = pb.grain
} else if n < pb.bN+pb.grain {
pb.cache = pb.bN + pb.grain - n
} else {
return false
}
}
pb.cache--
return true
}
// RunParallel runs a benchmark in parallel.
// It creates multiple goroutines and distributes b.N iterations among them.
// The number of goroutines defaults to GOMAXPROCS. To increase parallelism for
// non-CPU-bound benchmarks, call [B.SetParallelism] before RunParallel.
// RunParallel is usually used with the go test -cpu flag.
//
// The body function will be run in each goroutine. It should set up any
// goroutine-local state and then iterate until pb.Next returns false.
// It should not use the [B.StartTimer], [B.StopTimer], or [B.ResetTimer] functions,
// because they have global effect. It should also not call [B.Run].
//
// RunParallel reports ns/op values as wall time for the benchmark as a whole,
// not the sum of wall time or CPU time over each parallel goroutine.
func (b *B) RunParallel(body func(*PB)) {
if b.N == 0 {
return // Nothing to do when probing.
}
// Calculate grain size as number of iterations that take ~100µs.
// 100µs is enough to amortize the overhead and provide sufficient
// dynamic load balancing.
grain := uint64(0)
if b.previousN > 0 && b.previousDuration > 0 {
grain = 1e5 * uint64(b.previousN) / uint64(b.previousDuration)
}
if grain < 1 {
grain = 1
}
// We expect the inner loop and function call to take at least 10ns,
// so do not do more than 100µs/10ns=1e4 iterations.
if grain > 1e4 {
grain = 1e4
}
var n atomic.Uint64
numProcs := b.parallelism * runtime.GOMAXPROCS(0)
var wg sync.WaitGroup
wg.Add(numProcs)
for p := 0; p < numProcs; p++ {
go func() {
defer wg.Done()
pb := &PB{
globalN: &n,
grain: grain,
bN: uint64(b.N),
}
body(pb)
}()
}
wg.Wait()
if n.Load() <= uint64(b.N) && !b.Failed() {
b.Fatal("RunParallel: body exited without pb.Next() == false")
}
}
// SetParallelism sets the number of goroutines used by [B.RunParallel] to p*GOMAXPROCS.
// There is usually no need to call SetParallelism for CPU-bound benchmarks.
// If p is less than 1, this call will have no effect.
func (b *B) SetParallelism(p int) {
if p >= 1 {
b.parallelism = p
}
}
// Benchmark benchmarks a single function. It is useful for creating
// custom benchmarks that do not use the "go test" command.
//
// If f depends on testing flags, then [Init] must be used to register
// those flags before calling Benchmark and before calling [flag.Parse].
//
// If f calls Run, the result will be an estimate of running all its
// subbenchmarks that don't call Run in sequence in a single benchmark.
func Benchmark(f func(b *B)) BenchmarkResult {
b := &B{
common: common{
signal: make(chan bool),
w: discard{},
},
benchFunc: f,
benchTime: benchTime,
}
if b.run1() {
b.run()
}
return b.result
}
type discard struct{}
func (discard) Write(b []byte) (n int, err error) { return len(b), nil }

View File

@@ -0,0 +1,214 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package testing_test
import (
"bytes"
"cmp"
"runtime"
"slices"
"strings"
"sync/atomic"
"testing"
"text/template"
"time"
)
var prettyPrintTests = []struct {
v float64
expected string
}{
{0, " 0 x"},
{1234.1, " 1234 x"},
{-1234.1, " -1234 x"},
{999.950001, " 1000 x"},
{999.949999, " 999.9 x"},
{99.9950001, " 100.0 x"},
{99.9949999, " 99.99 x"},
{-99.9949999, " -99.99 x"},
{0.000999950001, " 0.001000 x"},
{0.000999949999, " 0.0009999 x"}, // smallest case
{0.0000999949999, " 0.0001000 x"},
}
func TestPrettyPrint(t *testing.T) {
for _, tt := range prettyPrintTests {
buf := new(strings.Builder)
testing.PrettyPrint(buf, tt.v, "x")
if tt.expected != buf.String() {
t.Errorf("prettyPrint(%v): expected %q, actual %q", tt.v, tt.expected, buf.String())
}
}
}
func TestResultString(t *testing.T) {
// Test fractional ns/op handling
r := testing.BenchmarkResult{
N: 100,
T: 240 * time.Nanosecond,
}
if r.NsPerOp() != 2 {
t.Errorf("NsPerOp: expected 2, actual %v", r.NsPerOp())
}
if want, got := " 100\t 2.400 ns/op", r.String(); want != got {
t.Errorf("String: expected %q, actual %q", want, got)
}
// Test sub-1 ns/op (issue #31005)
r.T = 40 * time.Nanosecond
if want, got := " 100\t 0.4000 ns/op", r.String(); want != got {
t.Errorf("String: expected %q, actual %q", want, got)
}
// Test 0 ns/op
r.T = 0
if want, got := " 100", r.String(); want != got {
t.Errorf("String: expected %q, actual %q", want, got)
}
}
func TestRunParallel(t *testing.T) {
if testing.Short() {
t.Skip("skipping in short mode")
}
testing.Benchmark(func(b *testing.B) {
procs := uint32(0)
iters := uint64(0)
b.SetParallelism(3)
b.RunParallel(func(pb *testing.PB) {
atomic.AddUint32(&procs, 1)
for pb.Next() {
atomic.AddUint64(&iters, 1)
}
})
if want := uint32(3 * runtime.GOMAXPROCS(0)); procs != want {
t.Errorf("got %v procs, want %v", procs, want)
}
if iters != uint64(b.N) {
t.Errorf("got %v iters, want %v", iters, b.N)
}
})
}
func TestRunParallelFail(t *testing.T) {
testing.Benchmark(func(b *testing.B) {
b.RunParallel(func(pb *testing.PB) {
// The function must be able to log/abort
// w/o crashing/deadlocking the whole benchmark.
b.Log("log")
b.Error("error")
})
})
}
func TestRunParallelFatal(t *testing.T) {
testing.Benchmark(func(b *testing.B) {
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
if b.N > 1 {
b.Fatal("error")
}
}
})
})
}
func TestRunParallelSkipNow(t *testing.T) {
testing.Benchmark(func(b *testing.B) {
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
if b.N > 1 {
b.SkipNow()
}
}
})
})
}
func ExampleB_RunParallel() {
// Parallel benchmark for text/template.Template.Execute on a single object.
testing.Benchmark(func(b *testing.B) {
templ := template.Must(template.New("test").Parse("Hello, {{.}}!"))
// RunParallel will create GOMAXPROCS goroutines
// and distribute work among them.
b.RunParallel(func(pb *testing.PB) {
// Each goroutine has its own bytes.Buffer.
var buf bytes.Buffer
for pb.Next() {
// The loop body is executed b.N times total across all goroutines.
buf.Reset()
templ.Execute(&buf, "World")
}
})
})
}
func TestReportMetric(t *testing.T) {
res := testing.Benchmark(func(b *testing.B) {
b.ReportMetric(12345, "ns/op")
b.ReportMetric(0.2, "frobs/op")
})
// Test built-in overriding.
if res.NsPerOp() != 12345 {
t.Errorf("NsPerOp: expected %v, actual %v", 12345, res.NsPerOp())
}
// Test stringing.
res.N = 1 // Make the output stable
want := " 1\t 12345 ns/op\t 0.2000 frobs/op"
if want != res.String() {
t.Errorf("expected %q, actual %q", want, res.String())
}
}
func ExampleB_ReportMetric() {
// This reports a custom benchmark metric relevant to a
// specific algorithm (in this case, sorting).
testing.Benchmark(func(b *testing.B) {
var compares int64
for i := 0; i < b.N; i++ {
s := []int{5, 4, 3, 2, 1}
slices.SortFunc(s, func(a, b int) int {
compares++
return cmp.Compare(a, b)
})
}
// This metric is per-operation, so divide by b.N and
// report it as a "/op" unit.
b.ReportMetric(float64(compares)/float64(b.N), "compares/op")
// This metric is per-time, so divide by b.Elapsed and
// report it as a "/ns" unit.
b.ReportMetric(float64(compares)/float64(b.Elapsed().Nanoseconds()), "compares/ns")
})
}
func ExampleB_ReportMetric_parallel() {
// This reports a custom benchmark metric relevant to a
// specific algorithm (in this case, sorting) in parallel.
testing.Benchmark(func(b *testing.B) {
var compares atomic.Int64
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
s := []int{5, 4, 3, 2, 1}
slices.SortFunc(s, func(a, b int) int {
// Because RunParallel runs the function many
// times in parallel, we must increment the
// counter atomically to avoid racing writes.
compares.Add(1)
return cmp.Compare(a, b)
})
}
})
// NOTE: Report each metric once, after all of the parallel
// calls have completed.
// This metric is per-operation, so divide by b.N and
// report it as a "/op" unit.
b.ReportMetric(float64(compares.Load())/float64(b.N), "compares/op")
// This metric is per-time, so divide by b.Elapsed and
// report it as a "/ns" unit.
b.ReportMetric(float64(compares.Load())/float64(b.Elapsed().Nanoseconds()), "compares/ns")
})
}

124
src/testing/cover.go Normal file
View File

@@ -0,0 +1,124 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Support for test coverage.
package testing
import (
"fmt"
"internal/goexperiment"
"os"
"sync/atomic"
)
// CoverBlock records the coverage data for a single basic block.
// The fields are 1-indexed, as in an editor: The opening line of
// the file is number 1, for example. Columns are measured
// in bytes.
// NOTE: This struct is internal to the testing infrastructure and may change.
// It is not covered (yet) by the Go 1 compatibility guidelines.
type CoverBlock struct {
Line0 uint32 // Line number for block start.
Col0 uint16 // Column number for block start.
Line1 uint32 // Line number for block end.
Col1 uint16 // Column number for block end.
Stmts uint16 // Number of statements included in this block.
}
var cover Cover
// Cover records information about test coverage checking.
// NOTE: This struct is internal to the testing infrastructure and may change.
// It is not covered (yet) by the Go 1 compatibility guidelines.
type Cover struct {
Mode string
Counters map[string][]uint32
Blocks map[string][]CoverBlock
CoveredPackages string
}
// Coverage reports the current code coverage as a fraction in the range [0, 1].
// If coverage is not enabled, Coverage returns 0.
//
// When running a large set of sequential test cases, checking Coverage after each one
// can be useful for identifying which test cases exercise new code paths.
// It is not a replacement for the reports generated by 'go test -cover' and
// 'go tool cover'.
func Coverage() float64 {
if goexperiment.CoverageRedesign {
return coverage2()
}
var n, d int64
for _, counters := range cover.Counters {
for i := range counters {
if atomic.LoadUint32(&counters[i]) > 0 {
n++
}
d++
}
}
if d == 0 {
return 0
}
return float64(n) / float64(d)
}
// RegisterCover records the coverage data accumulators for the tests.
// NOTE: This function is internal to the testing infrastructure and may change.
// It is not covered (yet) by the Go 1 compatibility guidelines.
func RegisterCover(c Cover) {
cover = c
}
// mustBeNil checks the error and, if present, reports it and exits.
func mustBeNil(err error) {
if err != nil {
fmt.Fprintf(os.Stderr, "testing: %s\n", err)
os.Exit(2)
}
}
// coverReport reports the coverage percentage and writes a coverage profile if requested.
func coverReport() {
if goexperiment.CoverageRedesign {
coverReport2()
return
}
var f *os.File
var err error
if *coverProfile != "" {
f, err = os.Create(toOutputDir(*coverProfile))
mustBeNil(err)
fmt.Fprintf(f, "mode: %s\n", cover.Mode)
defer func() { mustBeNil(f.Close()) }()
}
var active, total int64
var count uint32
for name, counts := range cover.Counters {
blocks := cover.Blocks[name]
for i := range counts {
stmts := int64(blocks[i].Stmts)
total += stmts
count = atomic.LoadUint32(&counts[i]) // For -mode=atomic.
if count > 0 {
active += stmts
}
if f != nil {
_, err := fmt.Fprintf(f, "%s:%d.%d,%d.%d %d %d\n", name,
blocks[i].Line0, blocks[i].Col0,
blocks[i].Line1, blocks[i].Col1,
stmts,
count)
mustBeNil(err)
}
}
}
if total == 0 {
fmt.Println("coverage: [no statements]")
return
}
fmt.Printf("coverage: %.1f%% of statements%s\n", 100*float64(active)/float64(total), cover.CoveredPackages)
}

97
src/testing/example.go Normal file
View File

@@ -0,0 +1,97 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package testing
import (
"fmt"
"slices"
"strings"
"time"
)
type InternalExample struct {
Name string
F func()
Output string
Unordered bool
}
// RunExamples is an internal function but exported because it is cross-package;
// it is part of the implementation of the "go test" command.
func RunExamples(matchString func(pat, str string) (bool, error), examples []InternalExample) (ok bool) {
_, ok = runExamples(matchString, examples)
return ok
}
func runExamples(matchString func(pat, str string) (bool, error), examples []InternalExample) (ran, ok bool) {
ok = true
m := newMatcher(matchString, *match, "-test.run", *skip)
var eg InternalExample
for _, eg = range examples {
_, matched, _ := m.fullName(nil, eg.Name)
if !matched {
continue
}
ran = true
if !runExample(eg) {
ok = false
}
}
return ran, ok
}
func sortLines(output string) string {
lines := strings.Split(output, "\n")
slices.Sort(lines)
return strings.Join(lines, "\n")
}
// processRunResult computes a summary and status of the result of running an example test.
// stdout is the captured output from stdout of the test.
// recovered is the result of invoking recover after running the test, in case it panicked.
//
// If stdout doesn't match the expected output or if recovered is non-nil, it'll print the cause of failure to stdout.
// If the test is chatty/verbose, it'll print a success message to stdout.
// If recovered is non-nil, it'll panic with that value.
// If the test panicked with nil, or invoked runtime.Goexit, it'll be
// made to fail and panic with errNilPanicOrGoexit
func (eg *InternalExample) processRunResult(stdout string, timeSpent time.Duration, finished bool, recovered any) (passed bool) {
passed = true
dstr := fmtDuration(timeSpent)
var fail string
got := strings.TrimSpace(stdout)
want := strings.TrimSpace(eg.Output)
if eg.Unordered {
if sortLines(got) != sortLines(want) && recovered == nil {
fail = fmt.Sprintf("got:\n%s\nwant (unordered):\n%s\n", stdout, eg.Output)
}
} else {
if got != want && recovered == nil {
fail = fmt.Sprintf("got:\n%s\nwant:\n%s\n", got, want)
}
}
if fail != "" || !finished || recovered != nil {
fmt.Printf("%s--- FAIL: %s (%s)\n%s", chatty.prefix(), eg.Name, dstr, fail)
passed = false
} else if chatty.on {
fmt.Printf("%s--- PASS: %s (%s)\n", chatty.prefix(), eg.Name, dstr)
}
if chatty.on && chatty.json {
fmt.Printf("%s=== NAME %s\n", chatty.prefix(), "")
}
if recovered != nil {
// Propagate the previously recovered result, by panicking.
panic(recovered)
} else if !finished {
panic(errNilPanicOrGoexit)
}
return
}

View File

@@ -0,0 +1,11 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package testing
var PrettyPrint = prettyPrint
type HighPrecisionTime = highPrecisionTime
var HighPrecisionTimeNow = highPrecisionTimeNow

89
src/testing/flag_test.go Normal file
View File

@@ -0,0 +1,89 @@
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package testing_test
import (
"flag"
"internal/testenv"
"os"
"os/exec"
"testing"
)
var testFlagArg = flag.String("test_flag_arg", "", "TestFlag: passing -v option")
const flagTestEnv = "GO_WANT_FLAG_HELPER_PROCESS"
func TestFlag(t *testing.T) {
if os.Getenv(flagTestEnv) == "1" {
testFlagHelper(t)
return
}
testenv.MustHaveExec(t)
for _, flag := range []string{"", "-test.v", "-test.v=test2json"} {
flag := flag
t.Run(flag, func(t *testing.T) {
t.Parallel()
exe, err := os.Executable()
if err != nil {
exe = os.Args[0]
}
cmd := exec.Command(exe, "-test.run=^TestFlag$", "-test_flag_arg="+flag)
if flag != "" {
cmd.Args = append(cmd.Args, flag)
}
cmd.Env = append(cmd.Environ(), flagTestEnv+"=1")
b, err := cmd.CombinedOutput()
if len(b) > 0 {
// When we set -test.v=test2json, we need to escape the ^V control
// character used for JSON framing so that the JSON parser doesn't
// misinterpret the subprocess output as output from the parent test.
t.Logf("%q", b)
}
if err != nil {
t.Error(err)
}
})
}
}
// testFlagHelper is called by the TestFlagHelper subprocess.
func testFlagHelper(t *testing.T) {
f := flag.Lookup("test.v")
if f == nil {
t.Fatal(`flag.Lookup("test.v") failed`)
}
bf, ok := f.Value.(interface{ IsBoolFlag() bool })
if !ok {
t.Errorf("test.v flag (type %T) does not have IsBoolFlag method", f)
} else if !bf.IsBoolFlag() {
t.Error("test.v IsBoolFlag() returned false")
}
gf, ok := f.Value.(flag.Getter)
if !ok {
t.Fatalf("test.v flag (type %T) does not have Get method", f)
}
v := gf.Get()
var want any
switch *testFlagArg {
case "":
want = false
case "-test.v":
want = true
case "-test.v=test2json":
want = "test2json"
default:
t.Fatalf("unexpected test_flag_arg %q", *testFlagArg)
}
if v != want {
t.Errorf("test.v is %v want %v", v, want)
}
}

244
src/testing/fstest/mapfs.go Normal file
View File

@@ -0,0 +1,244 @@
// Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package fstest
import (
"io"
"io/fs"
"path"
"slices"
"strings"
"time"
)
// A MapFS is a simple in-memory file system for use in tests,
// represented as a map from path names (arguments to Open)
// to information about the files or directories they represent.
//
// The map need not include parent directories for files contained
// in the map; those will be synthesized if needed.
// But a directory can still be included by setting the [MapFile.Mode]'s [fs.ModeDir] bit;
// this may be necessary for detailed control over the directory's [fs.FileInfo]
// or to create an empty directory.
//
// File system operations read directly from the map,
// so that the file system can be changed by editing the map as needed.
// An implication is that file system operations must not run concurrently
// with changes to the map, which would be a race.
// Another implication is that opening or reading a directory requires
// iterating over the entire map, so a MapFS should typically be used with not more
// than a few hundred entries or directory reads.
type MapFS map[string]*MapFile
// A MapFile describes a single file in a [MapFS].
type MapFile struct {
Data []byte // file content
Mode fs.FileMode // fs.FileInfo.Mode
ModTime time.Time // fs.FileInfo.ModTime
Sys any // fs.FileInfo.Sys
}
var _ fs.FS = MapFS(nil)
var _ fs.File = (*openMapFile)(nil)
// Open opens the named file.
func (fsys MapFS) Open(name string) (fs.File, error) {
if !fs.ValidPath(name) {
return nil, &fs.PathError{Op: "open", Path: name, Err: fs.ErrNotExist}
}
file := fsys[name]
if file != nil && file.Mode&fs.ModeDir == 0 {
// Ordinary file
return &openMapFile{name, mapFileInfo{path.Base(name), file}, 0}, nil
}
// Directory, possibly synthesized.
// Note that file can be nil here: the map need not contain explicit parent directories for all its files.
// But file can also be non-nil, in case the user wants to set metadata for the directory explicitly.
// Either way, we need to construct the list of children of this directory.
var list []mapFileInfo
var elem string
var need = make(map[string]bool)
if name == "." {
elem = "."
for fname, f := range fsys {
i := strings.Index(fname, "/")
if i < 0 {
if fname != "." {
list = append(list, mapFileInfo{fname, f})
}
} else {
need[fname[:i]] = true
}
}
} else {
elem = name[strings.LastIndex(name, "/")+1:]
prefix := name + "/"
for fname, f := range fsys {
if strings.HasPrefix(fname, prefix) {
felem := fname[len(prefix):]
i := strings.Index(felem, "/")
if i < 0 {
list = append(list, mapFileInfo{felem, f})
} else {
need[fname[len(prefix):len(prefix)+i]] = true
}
}
}
// If the directory name is not in the map,
// and there are no children of the name in the map,
// then the directory is treated as not existing.
if file == nil && list == nil && len(need) == 0 {
return nil, &fs.PathError{Op: "open", Path: name, Err: fs.ErrNotExist}
}
}
for _, fi := range list {
delete(need, fi.name)
}
for name := range need {
list = append(list, mapFileInfo{name, &MapFile{Mode: fs.ModeDir | 0555}})
}
slices.SortFunc(list, func(a, b mapFileInfo) int {
return strings.Compare(a.name, b.name)
})
if file == nil {
file = &MapFile{Mode: fs.ModeDir | 0555}
}
return &mapDir{name, mapFileInfo{elem, file}, list, 0}, nil
}
// fsOnly is a wrapper that hides all but the fs.FS methods,
// to avoid an infinite recursion when implementing special
// methods in terms of helpers that would use them.
// (In general, implementing these methods using the package fs helpers
// is redundant and unnecessary, but having the methods may make
// MapFS exercise more code paths when used in tests.)
type fsOnly struct{ fs.FS }
func (fsys MapFS) ReadFile(name string) ([]byte, error) {
return fs.ReadFile(fsOnly{fsys}, name)
}
func (fsys MapFS) Stat(name string) (fs.FileInfo, error) {
return fs.Stat(fsOnly{fsys}, name)
}
func (fsys MapFS) ReadDir(name string) ([]fs.DirEntry, error) {
return fs.ReadDir(fsOnly{fsys}, name)
}
func (fsys MapFS) Glob(pattern string) ([]string, error) {
return fs.Glob(fsOnly{fsys}, pattern)
}
type noSub struct {
MapFS
}
func (noSub) Sub() {} // not the fs.SubFS signature
func (fsys MapFS) Sub(dir string) (fs.FS, error) {
return fs.Sub(noSub{fsys}, dir)
}
// A mapFileInfo implements fs.FileInfo and fs.DirEntry for a given map file.
type mapFileInfo struct {
name string
f *MapFile
}
func (i *mapFileInfo) Name() string { return path.Base(i.name) }
func (i *mapFileInfo) Size() int64 { return int64(len(i.f.Data)) }
func (i *mapFileInfo) Mode() fs.FileMode { return i.f.Mode }
func (i *mapFileInfo) Type() fs.FileMode { return i.f.Mode.Type() }
func (i *mapFileInfo) ModTime() time.Time { return i.f.ModTime }
func (i *mapFileInfo) IsDir() bool { return i.f.Mode&fs.ModeDir != 0 }
func (i *mapFileInfo) Sys() any { return i.f.Sys }
func (i *mapFileInfo) Info() (fs.FileInfo, error) { return i, nil }
func (i *mapFileInfo) String() string {
return fs.FormatFileInfo(i)
}
// An openMapFile is a regular (non-directory) fs.File open for reading.
type openMapFile struct {
path string
mapFileInfo
offset int64
}
func (f *openMapFile) Stat() (fs.FileInfo, error) { return &f.mapFileInfo, nil }
func (f *openMapFile) Close() error { return nil }
func (f *openMapFile) Read(b []byte) (int, error) {
if f.offset >= int64(len(f.f.Data)) {
return 0, io.EOF
}
if f.offset < 0 {
return 0, &fs.PathError{Op: "read", Path: f.path, Err: fs.ErrInvalid}
}
n := copy(b, f.f.Data[f.offset:])
f.offset += int64(n)
return n, nil
}
func (f *openMapFile) Seek(offset int64, whence int) (int64, error) {
switch whence {
case 0:
// offset += 0
case 1:
offset += f.offset
case 2:
offset += int64(len(f.f.Data))
}
if offset < 0 || offset > int64(len(f.f.Data)) {
return 0, &fs.PathError{Op: "seek", Path: f.path, Err: fs.ErrInvalid}
}
f.offset = offset
return offset, nil
}
func (f *openMapFile) ReadAt(b []byte, offset int64) (int, error) {
if offset < 0 || offset > int64(len(f.f.Data)) {
return 0, &fs.PathError{Op: "read", Path: f.path, Err: fs.ErrInvalid}
}
n := copy(b, f.f.Data[offset:])
if n < len(b) {
return n, io.EOF
}
return n, nil
}
// A mapDir is a directory fs.File (so also an fs.ReadDirFile) open for reading.
type mapDir struct {
path string
mapFileInfo
entry []mapFileInfo
offset int
}
func (d *mapDir) Stat() (fs.FileInfo, error) { return &d.mapFileInfo, nil }
func (d *mapDir) Close() error { return nil }
func (d *mapDir) Read(b []byte) (int, error) {
return 0, &fs.PathError{Op: "read", Path: d.path, Err: fs.ErrInvalid}
}
func (d *mapDir) ReadDir(count int) ([]fs.DirEntry, error) {
n := len(d.entry) - d.offset
if n == 0 && count > 0 {
return nil, io.EOF
}
if count > 0 && n > count {
n = count
}
list := make([]fs.DirEntry, n)
for i := range list {
list[i] = &d.entry[d.offset+i]
}
d.offset += n
return list, nil
}

View File

@@ -0,0 +1,59 @@
// Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package fstest
import (
"fmt"
"io/fs"
"strings"
"testing"
)
func TestMapFS(t *testing.T) {
m := MapFS{
"hello": {Data: []byte("hello, world\n")},
"fortune/k/ken.txt": {Data: []byte("If a program is too slow, it must have a loop.\n")},
}
if err := TestFS(m, "hello", "fortune", "fortune/k", "fortune/k/ken.txt"); err != nil {
t.Fatal(err)
}
}
func TestMapFSChmodDot(t *testing.T) {
m := MapFS{
"a/b.txt": &MapFile{Mode: 0666},
".": &MapFile{Mode: 0777 | fs.ModeDir},
}
buf := new(strings.Builder)
fs.WalkDir(m, ".", func(path string, d fs.DirEntry, err error) error {
fi, err := d.Info()
if err != nil {
return err
}
fmt.Fprintf(buf, "%s: %v\n", path, fi.Mode())
return nil
})
want := `
.: drwxrwxrwx
a: dr-xr-xr-x
a/b.txt: -rw-rw-rw-
`[1:]
got := buf.String()
if want != got {
t.Errorf("MapFS modes want:\n%s\ngot:\n%s\n", want, got)
}
}
func TestMapFSFileInfoName(t *testing.T) {
m := MapFS{
"path/to/b.txt": &MapFile{},
}
info, _ := m.Stat("path/to/b.txt")
want := "b.txt"
got := info.Name()
if want != got {
t.Errorf("MapFS FileInfo.Name want:\n%s\ngot:\n%s\n", want, got)
}
}

View File

@@ -0,0 +1,621 @@
// Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package fstest implements support for testing implementations and users of file systems.
package fstest
import (
"errors"
"fmt"
"io"
"io/fs"
"path"
"reflect"
"slices"
"strings"
"testing/iotest"
)
// TestFS tests a file system implementation.
// It walks the entire tree of files in fsys,
// opening and checking that each file behaves correctly.
// It also checks that the file system contains at least the expected files.
// As a special case, if no expected files are listed, fsys must be empty.
// Otherwise, fsys must contain at least the listed files; it can also contain others.
// The contents of fsys must not change concurrently with TestFS.
//
// If TestFS finds any misbehaviors, it returns either the first error or a
// list of errors. Use [errors.Is] or [errors.As] to inspect.
//
// Typical usage inside a test is:
//
// if err := fstest.TestFS(myFS, "file/that/should/be/present"); err != nil {
// t.Fatal(err)
// }
func TestFS(fsys fs.FS, expected ...string) error {
if err := testFS(fsys, expected...); err != nil {
return err
}
for _, name := range expected {
if i := strings.Index(name, "/"); i >= 0 {
dir, dirSlash := name[:i], name[:i+1]
var subExpected []string
for _, name := range expected {
if strings.HasPrefix(name, dirSlash) {
subExpected = append(subExpected, name[len(dirSlash):])
}
}
sub, err := fs.Sub(fsys, dir)
if err != nil {
return err
}
if err := testFS(sub, subExpected...); err != nil {
return fmt.Errorf("testing fs.Sub(fsys, %s): %w", dir, err)
}
break // one sub-test is enough
}
}
return nil
}
func testFS(fsys fs.FS, expected ...string) error {
t := fsTester{fsys: fsys}
t.checkDir(".")
t.checkOpen(".")
found := make(map[string]bool)
for _, dir := range t.dirs {
found[dir] = true
}
for _, file := range t.files {
found[file] = true
}
delete(found, ".")
if len(expected) == 0 && len(found) > 0 {
var list []string
for k := range found {
if k != "." {
list = append(list, k)
}
}
slices.Sort(list)
if len(list) > 15 {
list = append(list[:10], "...")
}
t.errorf("expected empty file system but found files:\n%s", strings.Join(list, "\n"))
}
for _, name := range expected {
if !found[name] {
t.errorf("expected but not found: %s", name)
}
}
if len(t.errors) == 0 {
return nil
}
return fmt.Errorf("TestFS found errors:\n%w", errors.Join(t.errors...))
}
// An fsTester holds state for running the test.
type fsTester struct {
fsys fs.FS
errors []error
dirs []string
files []string
}
// errorf adds an error to the list of errors.
func (t *fsTester) errorf(format string, args ...any) {
t.errors = append(t.errors, fmt.Errorf(format, args...))
}
func (t *fsTester) openDir(dir string) fs.ReadDirFile {
f, err := t.fsys.Open(dir)
if err != nil {
t.errorf("%s: Open: %w", dir, err)
return nil
}
d, ok := f.(fs.ReadDirFile)
if !ok {
f.Close()
t.errorf("%s: Open returned File type %T, not a fs.ReadDirFile", dir, f)
return nil
}
return d
}
// checkDir checks the directory dir, which is expected to exist
// (it is either the root or was found in a directory listing with IsDir true).
func (t *fsTester) checkDir(dir string) {
// Read entire directory.
t.dirs = append(t.dirs, dir)
d := t.openDir(dir)
if d == nil {
return
}
list, err := d.ReadDir(-1)
if err != nil {
d.Close()
t.errorf("%s: ReadDir(-1): %w", dir, err)
return
}
// Check all children.
var prefix string
if dir == "." {
prefix = ""
} else {
prefix = dir + "/"
}
for _, info := range list {
name := info.Name()
switch {
case name == ".", name == "..", name == "":
t.errorf("%s: ReadDir: child has invalid name: %#q", dir, name)
continue
case strings.Contains(name, "/"):
t.errorf("%s: ReadDir: child name contains slash: %#q", dir, name)
continue
case strings.Contains(name, `\`):
t.errorf("%s: ReadDir: child name contains backslash: %#q", dir, name)
continue
}
path := prefix + name
t.checkStat(path, info)
t.checkOpen(path)
if info.IsDir() {
t.checkDir(path)
} else {
t.checkFile(path)
}
}
// Check ReadDir(-1) at EOF.
list2, err := d.ReadDir(-1)
if len(list2) > 0 || err != nil {
d.Close()
t.errorf("%s: ReadDir(-1) at EOF = %d entries, %w, wanted 0 entries, nil", dir, len(list2), err)
return
}
// Check ReadDir(1) at EOF (different results).
list2, err = d.ReadDir(1)
if len(list2) > 0 || err != io.EOF {
d.Close()
t.errorf("%s: ReadDir(1) at EOF = %d entries, %w, wanted 0 entries, EOF", dir, len(list2), err)
return
}
// Check that close does not report an error.
if err := d.Close(); err != nil {
t.errorf("%s: Close: %w", dir, err)
}
// Check that closing twice doesn't crash.
// The return value doesn't matter.
d.Close()
// Reopen directory, read a second time, make sure contents match.
if d = t.openDir(dir); d == nil {
return
}
defer d.Close()
list2, err = d.ReadDir(-1)
if err != nil {
t.errorf("%s: second Open+ReadDir(-1): %w", dir, err)
return
}
t.checkDirList(dir, "first Open+ReadDir(-1) vs second Open+ReadDir(-1)", list, list2)
// Reopen directory, read a third time in pieces, make sure contents match.
if d = t.openDir(dir); d == nil {
return
}
defer d.Close()
list2 = nil
for {
n := 1
if len(list2) > 0 {
n = 2
}
frag, err := d.ReadDir(n)
if len(frag) > n {
t.errorf("%s: third Open: ReadDir(%d) after %d: %d entries (too many)", dir, n, len(list2), len(frag))
return
}
list2 = append(list2, frag...)
if err == io.EOF {
break
}
if err != nil {
t.errorf("%s: third Open: ReadDir(%d) after %d: %w", dir, n, len(list2), err)
return
}
if n == 0 {
t.errorf("%s: third Open: ReadDir(%d) after %d: 0 entries but nil error", dir, n, len(list2))
return
}
}
t.checkDirList(dir, "first Open+ReadDir(-1) vs third Open+ReadDir(1,2) loop", list, list2)
// If fsys has ReadDir, check that it matches and is sorted.
if fsys, ok := t.fsys.(fs.ReadDirFS); ok {
list2, err := fsys.ReadDir(dir)
if err != nil {
t.errorf("%s: fsys.ReadDir: %w", dir, err)
return
}
t.checkDirList(dir, "first Open+ReadDir(-1) vs fsys.ReadDir", list, list2)
for i := 0; i+1 < len(list2); i++ {
if list2[i].Name() >= list2[i+1].Name() {
t.errorf("%s: fsys.ReadDir: list not sorted: %s before %s", dir, list2[i].Name(), list2[i+1].Name())
}
}
}
// Check fs.ReadDir as well.
list2, err = fs.ReadDir(t.fsys, dir)
if err != nil {
t.errorf("%s: fs.ReadDir: %w", dir, err)
return
}
t.checkDirList(dir, "first Open+ReadDir(-1) vs fs.ReadDir", list, list2)
for i := 0; i+1 < len(list2); i++ {
if list2[i].Name() >= list2[i+1].Name() {
t.errorf("%s: fs.ReadDir: list not sorted: %s before %s", dir, list2[i].Name(), list2[i+1].Name())
}
}
t.checkGlob(dir, list2)
}
// formatEntry formats an fs.DirEntry into a string for error messages and comparison.
func formatEntry(entry fs.DirEntry) string {
return fmt.Sprintf("%s IsDir=%v Type=%v", entry.Name(), entry.IsDir(), entry.Type())
}
// formatInfoEntry formats an fs.FileInfo into a string like the result of formatEntry, for error messages and comparison.
func formatInfoEntry(info fs.FileInfo) string {
return fmt.Sprintf("%s IsDir=%v Type=%v", info.Name(), info.IsDir(), info.Mode().Type())
}
// formatInfo formats an fs.FileInfo into a string for error messages and comparison.
func formatInfo(info fs.FileInfo) string {
return fmt.Sprintf("%s IsDir=%v Mode=%v Size=%d ModTime=%v", info.Name(), info.IsDir(), info.Mode(), info.Size(), info.ModTime())
}
// checkGlob checks that various glob patterns work if the file system implements GlobFS.
func (t *fsTester) checkGlob(dir string, list []fs.DirEntry) {
if _, ok := t.fsys.(fs.GlobFS); !ok {
return
}
// Make a complex glob pattern prefix that only matches dir.
var glob string
if dir != "." {
elem := strings.Split(dir, "/")
for i, e := range elem {
var pattern []rune
for j, r := range e {
if r == '*' || r == '?' || r == '\\' || r == '[' || r == '-' {
pattern = append(pattern, '\\', r)
continue
}
switch (i + j) % 5 {
case 0:
pattern = append(pattern, r)
case 1:
pattern = append(pattern, '[', r, ']')
case 2:
pattern = append(pattern, '[', r, '-', r, ']')
case 3:
pattern = append(pattern, '[', '\\', r, ']')
case 4:
pattern = append(pattern, '[', '\\', r, '-', '\\', r, ']')
}
}
elem[i] = string(pattern)
}
glob = strings.Join(elem, "/") + "/"
}
// Test that malformed patterns are detected.
// The error is likely path.ErrBadPattern but need not be.
if _, err := t.fsys.(fs.GlobFS).Glob(glob + "nonexist/[]"); err == nil {
t.errorf("%s: Glob(%#q): bad pattern not detected", dir, glob+"nonexist/[]")
}
// Try to find a letter that appears in only some of the final names.
c := rune('a')
for ; c <= 'z'; c++ {
have, haveNot := false, false
for _, d := range list {
if strings.ContainsRune(d.Name(), c) {
have = true
} else {
haveNot = true
}
}
if have && haveNot {
break
}
}
if c > 'z' {
c = 'a'
}
glob += "*" + string(c) + "*"
var want []string
for _, d := range list {
if strings.ContainsRune(d.Name(), c) {
want = append(want, path.Join(dir, d.Name()))
}
}
names, err := t.fsys.(fs.GlobFS).Glob(glob)
if err != nil {
t.errorf("%s: Glob(%#q): %w", dir, glob, err)
return
}
if reflect.DeepEqual(want, names) {
return
}
if !slices.IsSorted(names) {
t.errorf("%s: Glob(%#q): unsorted output:\n%s", dir, glob, strings.Join(names, "\n"))
slices.Sort(names)
}
var problems []string
for len(want) > 0 || len(names) > 0 {
switch {
case len(want) > 0 && len(names) > 0 && want[0] == names[0]:
want, names = want[1:], names[1:]
case len(want) > 0 && (len(names) == 0 || want[0] < names[0]):
problems = append(problems, "missing: "+want[0])
want = want[1:]
default:
problems = append(problems, "extra: "+names[0])
names = names[1:]
}
}
t.errorf("%s: Glob(%#q): wrong output:\n%s", dir, glob, strings.Join(problems, "\n"))
}
// checkStat checks that a direct stat of path matches entry,
// which was found in the parent's directory listing.
func (t *fsTester) checkStat(path string, entry fs.DirEntry) {
file, err := t.fsys.Open(path)
if err != nil {
t.errorf("%s: Open: %w", path, err)
return
}
info, err := file.Stat()
file.Close()
if err != nil {
t.errorf("%s: Stat: %w", path, err)
return
}
fentry := formatEntry(entry)
fientry := formatInfoEntry(info)
// Note: mismatch here is OK for symlink, because Open dereferences symlink.
if fentry != fientry && entry.Type()&fs.ModeSymlink == 0 {
t.errorf("%s: mismatch:\n\tentry = %s\n\tfile.Stat() = %s", path, fentry, fientry)
}
einfo, err := entry.Info()
if err != nil {
t.errorf("%s: entry.Info: %w", path, err)
return
}
finfo := formatInfo(info)
if entry.Type()&fs.ModeSymlink != 0 {
// For symlink, just check that entry.Info matches entry on common fields.
// Open deferences symlink, so info itself may differ.
feentry := formatInfoEntry(einfo)
if fentry != feentry {
t.errorf("%s: mismatch\n\tentry = %s\n\tentry.Info() = %s\n", path, fentry, feentry)
}
} else {
feinfo := formatInfo(einfo)
if feinfo != finfo {
t.errorf("%s: mismatch:\n\tentry.Info() = %s\n\tfile.Stat() = %s\n", path, feinfo, finfo)
}
}
// Stat should be the same as Open+Stat, even for symlinks.
info2, err := fs.Stat(t.fsys, path)
if err != nil {
t.errorf("%s: fs.Stat: %w", path, err)
return
}
finfo2 := formatInfo(info2)
if finfo2 != finfo {
t.errorf("%s: fs.Stat(...) = %s\n\twant %s", path, finfo2, finfo)
}
if fsys, ok := t.fsys.(fs.StatFS); ok {
info2, err := fsys.Stat(path)
if err != nil {
t.errorf("%s: fsys.Stat: %w", path, err)
return
}
finfo2 := formatInfo(info2)
if finfo2 != finfo {
t.errorf("%s: fsys.Stat(...) = %s\n\twant %s", path, finfo2, finfo)
}
}
}
// checkDirList checks that two directory lists contain the same files and file info.
// The order of the lists need not match.
func (t *fsTester) checkDirList(dir, desc string, list1, list2 []fs.DirEntry) {
old := make(map[string]fs.DirEntry)
checkMode := func(entry fs.DirEntry) {
if entry.IsDir() != (entry.Type()&fs.ModeDir != 0) {
if entry.IsDir() {
t.errorf("%s: ReadDir returned %s with IsDir() = true, Type() & ModeDir = 0", dir, entry.Name())
} else {
t.errorf("%s: ReadDir returned %s with IsDir() = false, Type() & ModeDir = ModeDir", dir, entry.Name())
}
}
}
for _, entry1 := range list1 {
old[entry1.Name()] = entry1
checkMode(entry1)
}
var diffs []string
for _, entry2 := range list2 {
entry1 := old[entry2.Name()]
if entry1 == nil {
checkMode(entry2)
diffs = append(diffs, "+ "+formatEntry(entry2))
continue
}
if formatEntry(entry1) != formatEntry(entry2) {
diffs = append(diffs, "- "+formatEntry(entry1), "+ "+formatEntry(entry2))
}
delete(old, entry2.Name())
}
for _, entry1 := range old {
diffs = append(diffs, "- "+formatEntry(entry1))
}
if len(diffs) == 0 {
return
}
slices.SortFunc(diffs, func(a, b string) int {
fa := strings.Fields(a)
fb := strings.Fields(b)
// sort by name (i < j) and then +/- (j < i, because + < -)
return strings.Compare(fa[1]+" "+fb[0], fb[1]+" "+fa[0])
})
t.errorf("%s: diff %s:\n\t%s", dir, desc, strings.Join(diffs, "\n\t"))
}
// checkFile checks that basic file reading works correctly.
func (t *fsTester) checkFile(file string) {
t.files = append(t.files, file)
// Read entire file.
f, err := t.fsys.Open(file)
if err != nil {
t.errorf("%s: Open: %w", file, err)
return
}
data, err := io.ReadAll(f)
if err != nil {
f.Close()
t.errorf("%s: Open+ReadAll: %w", file, err)
return
}
if err := f.Close(); err != nil {
t.errorf("%s: Close: %w", file, err)
}
// Check that closing twice doesn't crash.
// The return value doesn't matter.
f.Close()
// Check that ReadFile works if present.
if fsys, ok := t.fsys.(fs.ReadFileFS); ok {
data2, err := fsys.ReadFile(file)
if err != nil {
t.errorf("%s: fsys.ReadFile: %w", file, err)
return
}
t.checkFileRead(file, "ReadAll vs fsys.ReadFile", data, data2)
// Modify the data and check it again. Modifying the
// returned byte slice should not affect the next call.
for i := range data2 {
data2[i]++
}
data2, err = fsys.ReadFile(file)
if err != nil {
t.errorf("%s: second call to fsys.ReadFile: %w", file, err)
return
}
t.checkFileRead(file, "Readall vs second fsys.ReadFile", data, data2)
t.checkBadPath(file, "ReadFile",
func(name string) error { _, err := fsys.ReadFile(name); return err })
}
// Check that fs.ReadFile works with t.fsys.
data2, err := fs.ReadFile(t.fsys, file)
if err != nil {
t.errorf("%s: fs.ReadFile: %w", file, err)
return
}
t.checkFileRead(file, "ReadAll vs fs.ReadFile", data, data2)
// Use iotest.TestReader to check small reads, Seek, ReadAt.
f, err = t.fsys.Open(file)
if err != nil {
t.errorf("%s: second Open: %w", file, err)
return
}
defer f.Close()
if err := iotest.TestReader(f, data); err != nil {
t.errorf("%s: failed TestReader:\n\t%s", file, strings.ReplaceAll(err.Error(), "\n", "\n\t"))
}
}
func (t *fsTester) checkFileRead(file, desc string, data1, data2 []byte) {
if string(data1) != string(data2) {
t.errorf("%s: %s: different data returned\n\t%q\n\t%q", file, desc, data1, data2)
return
}
}
// checkBadPath checks that various invalid forms of file's name cannot be opened using t.fsys.Open.
func (t *fsTester) checkOpen(file string) {
t.checkBadPath(file, "Open", func(file string) error {
f, err := t.fsys.Open(file)
if err == nil {
f.Close()
}
return err
})
}
// checkBadPath checks that various invalid forms of file's name cannot be opened using open.
func (t *fsTester) checkBadPath(file string, desc string, open func(string) error) {
bad := []string{
"/" + file,
file + "/.",
}
if file == "." {
bad = append(bad, "/")
}
if i := strings.Index(file, "/"); i >= 0 {
bad = append(bad,
file[:i]+"//"+file[i+1:],
file[:i]+"/./"+file[i+1:],
file[:i]+`\`+file[i+1:],
file[:i]+"/../"+file,
)
}
if i := strings.LastIndex(file, "/"); i >= 0 {
bad = append(bad,
file[:i]+"//"+file[i+1:],
file[:i]+"/./"+file[i+1:],
file[:i]+`\`+file[i+1:],
file+"/../"+file[i+1:],
)
}
for _, b := range bad {
if err := open(b); err == nil {
t.errorf("%s: %s(%s) succeeded, want error", file, desc, b)
}
}
}

View File

@@ -0,0 +1,117 @@
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package fstest
import (
"errors"
"internal/testenv"
"io/fs"
"os"
"path/filepath"
"slices"
"strings"
"testing"
)
func TestSymlink(t *testing.T) {
testenv.MustHaveSymlink(t)
tmp := t.TempDir()
tmpfs := os.DirFS(tmp)
if err := os.WriteFile(filepath.Join(tmp, "hello"), []byte("hello, world\n"), 0644); err != nil {
t.Fatal(err)
}
if err := os.Symlink(filepath.Join(tmp, "hello"), filepath.Join(tmp, "hello.link")); err != nil {
t.Fatal(err)
}
if err := TestFS(tmpfs, "hello", "hello.link"); err != nil {
t.Fatal(err)
}
}
func TestDash(t *testing.T) {
m := MapFS{
"a-b/a": {Data: []byte("a-b/a")},
}
if err := TestFS(m, "a-b/a"); err != nil {
t.Error(err)
}
}
type shuffledFS MapFS
func (fsys shuffledFS) Open(name string) (fs.File, error) {
f, err := MapFS(fsys).Open(name)
if err != nil {
return nil, err
}
return &shuffledFile{File: f}, nil
}
type shuffledFile struct{ fs.File }
func (f *shuffledFile) ReadDir(n int) ([]fs.DirEntry, error) {
dirents, err := f.File.(fs.ReadDirFile).ReadDir(n)
// Shuffle in a deterministic way, all we care about is making sure that the
// list of directory entries is not is the lexicographic order.
//
// We do this to make sure that the TestFS test suite is not affected by the
// order of directory entries.
slices.SortFunc(dirents, func(a, b fs.DirEntry) int {
return strings.Compare(b.Name(), a.Name())
})
return dirents, err
}
func TestShuffledFS(t *testing.T) {
fsys := shuffledFS{
"tmp/one": {Data: []byte("1")},
"tmp/two": {Data: []byte("2")},
"tmp/three": {Data: []byte("3")},
}
if err := TestFS(fsys, "tmp/one", "tmp/two", "tmp/three"); err != nil {
t.Error(err)
}
}
// failPermFS is a filesystem that always fails with fs.ErrPermission.
type failPermFS struct{}
func (f failPermFS) Open(name string) (fs.File, error) {
if !fs.ValidPath(name) {
return nil, &fs.PathError{Op: "open", Path: name, Err: fs.ErrInvalid}
}
return nil, &fs.PathError{Op: "open", Path: name, Err: fs.ErrPermission}
}
func TestTestFSWrappedErrors(t *testing.T) {
err := TestFS(failPermFS{})
if err == nil {
t.Fatal("error expected")
}
t.Logf("Error (expecting wrapped fs.ErrPermission):\n%v", err)
if !errors.Is(err, fs.ErrPermission) {
t.Errorf("error should be a wrapped ErrPermission: %#v", err)
}
// TestFS is expected to return a list of errors.
// Enforce that the list can be extracted for browsing.
var errs interface{ Unwrap() []error }
if !errors.As(err, &errs) {
t.Errorf("caller should be able to extract the errors as a list: %#v", err)
} else {
for _, err := range errs.Unwrap() {
// ErrPermission is expected
// but any other error must be reported.
if !errors.Is(err, fs.ErrPermission) {
t.Errorf("unexpected error: %v", err)
}
}
}
}

731
src/testing/fuzz.go Normal file
View File

@@ -0,0 +1,731 @@
// Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package testing
import (
"errors"
"flag"
"fmt"
"io"
"os"
"path/filepath"
"reflect"
"runtime"
"strings"
"time"
)
func initFuzzFlags() {
matchFuzz = flag.String("test.fuzz", "", "run the fuzz test matching `regexp`")
flag.Var(&fuzzDuration, "test.fuzztime", "time to spend fuzzing; default is to run indefinitely")
flag.Var(&minimizeDuration, "test.fuzzminimizetime", "time to spend minimizing a value after finding a failing input")
fuzzCacheDir = flag.String("test.fuzzcachedir", "", "directory where interesting fuzzing inputs are stored (for use only by cmd/go)")
isFuzzWorker = flag.Bool("test.fuzzworker", false, "coordinate with the parent process to fuzz random values (for use only by cmd/go)")
}
var (
matchFuzz *string
fuzzDuration durationOrCountFlag
minimizeDuration = durationOrCountFlag{d: 60 * time.Second, allowZero: true}
fuzzCacheDir *string
isFuzzWorker *bool
// corpusDir is the parent directory of the fuzz test's seed corpus within
// the package.
corpusDir = "testdata/fuzz"
)
// fuzzWorkerExitCode is used as an exit code by fuzz worker processes after an
// internal error. This distinguishes internal errors from uncontrolled panics
// and other failures. Keep in sync with internal/fuzz.workerExitCode.
const fuzzWorkerExitCode = 70
// InternalFuzzTarget is an internal type but exported because it is
// cross-package; it is part of the implementation of the "go test" command.
type InternalFuzzTarget struct {
Name string
Fn func(f *F)
}
// F is a type passed to fuzz tests.
//
// Fuzz tests run generated inputs against a provided fuzz target, which can
// find and report potential bugs in the code being tested.
//
// A fuzz test runs the seed corpus by default, which includes entries provided
// by (*F).Add and entries in the testdata/fuzz/<FuzzTestName> directory. After
// any necessary setup and calls to (*F).Add, the fuzz test must then call
// (*F).Fuzz to provide the fuzz target. See the testing package documentation
// for an example, and see the [F.Fuzz] and [F.Add] method documentation for
// details.
//
// *F methods can only be called before (*F).Fuzz. Once the test is
// executing the fuzz target, only (*T) methods can be used. The only *F methods
// that are allowed in the (*F).Fuzz function are (*F).Failed and (*F).Name.
type F struct {
common
fuzzContext *fuzzContext
testContext *testContext
// inFuzzFn is true when the fuzz function is running. Most F methods cannot
// be called when inFuzzFn is true.
inFuzzFn bool
// corpus is a set of seed corpus entries, added with F.Add and loaded
// from testdata.
corpus []corpusEntry
result fuzzResult
fuzzCalled bool
}
var _ TB = (*F)(nil)
// corpusEntry is an alias to the same type as internal/fuzz.CorpusEntry.
// We use a type alias because we don't want to export this type, and we can't
// import internal/fuzz from testing.
type corpusEntry = struct {
Parent string
Path string
Data []byte
Values []any
Generation int
IsSeed bool
}
// Helper marks the calling function as a test helper function.
// When printing file and line information, that function will be skipped.
// Helper may be called simultaneously from multiple goroutines.
func (f *F) Helper() {
if f.inFuzzFn {
panic("testing: f.Helper was called inside the fuzz target, use t.Helper instead")
}
// common.Helper is inlined here.
// If we called it, it would mark F.Helper as the helper
// instead of the caller.
f.mu.Lock()
defer f.mu.Unlock()
if f.helperPCs == nil {
f.helperPCs = make(map[uintptr]struct{})
}
// repeating code from callerName here to save walking a stack frame
var pc [1]uintptr
n := runtime.Callers(2, pc[:]) // skip runtime.Callers + Helper
if n == 0 {
panic("testing: zero callers found")
}
if _, found := f.helperPCs[pc[0]]; !found {
f.helperPCs[pc[0]] = struct{}{}
f.helperNames = nil // map will be recreated next time it is needed
}
}
// Fail marks the function as having failed but continues execution.
func (f *F) Fail() {
// (*F).Fail may be called by (*T).Fail, which we should allow. However, we
// shouldn't allow direct (*F).Fail calls from inside the (*F).Fuzz function.
if f.inFuzzFn {
panic("testing: f.Fail was called inside the fuzz target, use t.Fail instead")
}
f.common.Helper()
f.common.Fail()
}
// Skipped reports whether the test was skipped.
func (f *F) Skipped() bool {
// (*F).Skipped may be called by tRunner, which we should allow. However, we
// shouldn't allow direct (*F).Skipped calls from inside the (*F).Fuzz function.
if f.inFuzzFn {
panic("testing: f.Skipped was called inside the fuzz target, use t.Skipped instead")
}
f.common.Helper()
return f.common.Skipped()
}
// Add will add the arguments to the seed corpus for the fuzz test. This will be
// a no-op if called after or within the fuzz target, and args must match the
// arguments for the fuzz target.
func (f *F) Add(args ...any) {
var values []any
for i := range args {
if t := reflect.TypeOf(args[i]); !supportedTypes[t] {
panic(fmt.Sprintf("testing: unsupported type to Add %v", t))
}
values = append(values, args[i])
}
f.corpus = append(f.corpus, corpusEntry{Values: values, IsSeed: true, Path: fmt.Sprintf("seed#%d", len(f.corpus))})
}
// supportedTypes represents all of the supported types which can be fuzzed.
var supportedTypes = map[reflect.Type]bool{
reflect.TypeOf(([]byte)("")): true,
reflect.TypeOf((string)("")): true,
reflect.TypeOf((bool)(false)): true,
reflect.TypeOf((byte)(0)): true,
reflect.TypeOf((rune)(0)): true,
reflect.TypeOf((float32)(0)): true,
reflect.TypeOf((float64)(0)): true,
reflect.TypeOf((int)(0)): true,
reflect.TypeOf((int8)(0)): true,
reflect.TypeOf((int16)(0)): true,
reflect.TypeOf((int32)(0)): true,
reflect.TypeOf((int64)(0)): true,
reflect.TypeOf((uint)(0)): true,
reflect.TypeOf((uint8)(0)): true,
reflect.TypeOf((uint16)(0)): true,
reflect.TypeOf((uint32)(0)): true,
reflect.TypeOf((uint64)(0)): true,
}
// Fuzz runs the fuzz function, ff, for fuzz testing. If ff fails for a set of
// arguments, those arguments will be added to the seed corpus.
//
// ff must be a function with no return value whose first argument is *T and
// whose remaining arguments are the types to be fuzzed.
// For example:
//
// f.Fuzz(func(t *testing.T, b []byte, i int) { ... })
//
// The following types are allowed: []byte, string, bool, byte, rune, float32,
// float64, int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64.
// More types may be supported in the future.
//
// ff must not call any *F methods, e.g. (*F).Log, (*F).Error, (*F).Skip. Use
// the corresponding *T method instead. The only *F methods that are allowed in
// the (*F).Fuzz function are (*F).Failed and (*F).Name.
//
// This function should be fast and deterministic, and its behavior should not
// depend on shared state. No mutable input arguments, or pointers to them,
// should be retained between executions of the fuzz function, as the memory
// backing them may be mutated during a subsequent invocation. ff must not
// modify the underlying data of the arguments provided by the fuzzing engine.
//
// When fuzzing, F.Fuzz does not return until a problem is found, time runs out
// (set with -fuzztime), or the test process is interrupted by a signal. F.Fuzz
// should be called exactly once, unless F.Skip or [F.Fail] is called beforehand.
func (f *F) Fuzz(ff any) {
if f.fuzzCalled {
panic("testing: F.Fuzz called more than once")
}
f.fuzzCalled = true
if f.failed {
return
}
f.Helper()
// ff should be in the form func(*testing.T, ...interface{})
fn := reflect.ValueOf(ff)
fnType := fn.Type()
if fnType.Kind() != reflect.Func {
panic("testing: F.Fuzz must receive a function")
}
if fnType.NumIn() < 2 || fnType.In(0) != reflect.TypeOf((*T)(nil)) {
panic("testing: fuzz target must receive at least two arguments, where the first argument is a *T")
}
if fnType.NumOut() != 0 {
panic("testing: fuzz target must not return a value")
}
// Save the types of the function to compare against the corpus.
var types []reflect.Type
for i := 1; i < fnType.NumIn(); i++ {
t := fnType.In(i)
if !supportedTypes[t] {
panic(fmt.Sprintf("testing: unsupported type for fuzzing %v", t))
}
types = append(types, t)
}
// Load the testdata seed corpus. Check types of entries in the testdata
// corpus and entries declared with F.Add.
//
// Don't load the seed corpus if this is a worker process; we won't use it.
if f.fuzzContext.mode != fuzzWorker {
for _, c := range f.corpus {
if err := f.fuzzContext.deps.CheckCorpus(c.Values, types); err != nil {
// TODO(#48302): Report the source location of the F.Add call.
f.Fatal(err)
}
}
// Load seed corpus
c, err := f.fuzzContext.deps.ReadCorpus(filepath.Join(corpusDir, f.name), types)
if err != nil {
f.Fatal(err)
}
for i := range c {
c[i].IsSeed = true // these are all seed corpus values
if f.fuzzContext.mode == fuzzCoordinator {
// If this is the coordinator process, zero the values, since we don't need
// to hold onto them.
c[i].Values = nil
}
}
f.corpus = append(f.corpus, c...)
}
// run calls fn on a given input, as a subtest with its own T.
// run is analogous to T.Run. The test filtering and cleanup works similarly.
// fn is called in its own goroutine.
run := func(captureOut io.Writer, e corpusEntry) (ok bool) {
if e.Values == nil {
// The corpusEntry must have non-nil Values in order to run the
// test. If Values is nil, it is a bug in our code.
panic(fmt.Sprintf("corpus file %q was not unmarshaled", e.Path))
}
if shouldFailFast() {
return true
}
testName := f.name
if e.Path != "" {
testName = fmt.Sprintf("%s/%s", testName, filepath.Base(e.Path))
}
if f.testContext.isFuzzing {
// Don't preserve subtest names while fuzzing. If fn calls T.Run,
// there will be a very large number of subtests with duplicate names,
// which will use a large amount of memory. The subtest names aren't
// useful since there's no way to re-run them deterministically.
f.testContext.match.clearSubNames()
}
// Record the stack trace at the point of this call so that if the subtest
// function - which runs in a separate stack - is marked as a helper, we can
// continue walking the stack into the parent test.
var pc [maxStackLen]uintptr
n := runtime.Callers(2, pc[:])
t := &T{
common: common{
barrier: make(chan bool),
signal: make(chan bool),
name: testName,
parent: &f.common,
level: f.level + 1,
creator: pc[:n],
chatty: f.chatty,
},
context: f.testContext,
}
if captureOut != nil {
// t.parent aliases f.common.
t.parent.w = captureOut
}
t.w = indenter{&t.common}
if t.chatty != nil {
t.chatty.Updatef(t.name, "=== RUN %s\n", t.name)
}
f.common.inFuzzFn, f.inFuzzFn = true, true
go tRunner(t, func(t *T) {
args := []reflect.Value{reflect.ValueOf(t)}
for _, v := range e.Values {
args = append(args, reflect.ValueOf(v))
}
// Before resetting the current coverage, defer the snapshot so that
// we make sure it is called right before the tRunner function
// exits, regardless of whether it was executed cleanly, panicked,
// or if the fuzzFn called t.Fatal.
if f.testContext.isFuzzing {
defer f.fuzzContext.deps.SnapshotCoverage()
f.fuzzContext.deps.ResetCoverage()
}
fn.Call(args)
})
<-t.signal
if t.chatty != nil && t.chatty.json {
t.chatty.Updatef(t.parent.name, "=== NAME %s\n", t.parent.name)
}
f.common.inFuzzFn, f.inFuzzFn = false, false
return !t.Failed()
}
switch f.fuzzContext.mode {
case fuzzCoordinator:
// Fuzzing is enabled, and this is the test process started by 'go test'.
// Act as the coordinator process, and coordinate workers to perform the
// actual fuzzing.
corpusTargetDir := filepath.Join(corpusDir, f.name)
cacheTargetDir := filepath.Join(*fuzzCacheDir, f.name)
err := f.fuzzContext.deps.CoordinateFuzzing(
fuzzDuration.d,
int64(fuzzDuration.n),
minimizeDuration.d,
int64(minimizeDuration.n),
*parallel,
f.corpus,
types,
corpusTargetDir,
cacheTargetDir)
if err != nil {
f.result = fuzzResult{Error: err}
f.Fail()
fmt.Fprintf(f.w, "%v\n", err)
if crashErr, ok := err.(fuzzCrashError); ok {
crashPath := crashErr.CrashPath()
fmt.Fprintf(f.w, "Failing input written to %s\n", crashPath)
testName := filepath.Base(crashPath)
fmt.Fprintf(f.w, "To re-run:\ngo test -run=%s/%s\n", f.name, testName)
}
}
// TODO(jayconrod,katiehockman): Aggregate statistics across workers
// and add to FuzzResult (ie. time taken, num iterations)
case fuzzWorker:
// Fuzzing is enabled, and this is a worker process. Follow instructions
// from the coordinator.
if err := f.fuzzContext.deps.RunFuzzWorker(func(e corpusEntry) error {
// Don't write to f.w (which points to Stdout) if running from a
// fuzz worker. This would become very verbose, particularly during
// minimization. Return the error instead, and let the caller deal
// with the output.
var buf strings.Builder
if ok := run(&buf, e); !ok {
return errors.New(buf.String())
}
return nil
}); err != nil {
// Internal errors are marked with f.Fail; user code may call this too, before F.Fuzz.
// The worker will exit with fuzzWorkerExitCode, indicating this is a failure
// (and 'go test' should exit non-zero) but a failing input should not be recorded.
f.Errorf("communicating with fuzzing coordinator: %v", err)
}
default:
// Fuzzing is not enabled, or will be done later. Only run the seed
// corpus now.
for _, e := range f.corpus {
name := fmt.Sprintf("%s/%s", f.name, filepath.Base(e.Path))
if _, ok, _ := f.testContext.match.fullName(nil, name); ok {
run(f.w, e)
}
}
}
}
func (f *F) report() {
if *isFuzzWorker || f.parent == nil {
return
}
dstr := fmtDuration(f.duration)
format := "--- %s: %s (%s)\n"
if f.Failed() {
f.flushToParent(f.name, format, "FAIL", f.name, dstr)
} else if f.chatty != nil {
if f.Skipped() {
f.flushToParent(f.name, format, "SKIP", f.name, dstr)
} else {
f.flushToParent(f.name, format, "PASS", f.name, dstr)
}
}
}
// fuzzResult contains the results of a fuzz run.
type fuzzResult struct {
N int // The number of iterations.
T time.Duration // The total time taken.
Error error // Error is the error from the failing input
}
func (r fuzzResult) String() string {
if r.Error == nil {
return ""
}
return r.Error.Error()
}
// fuzzCrashError is satisfied by a failing input detected while fuzzing.
// These errors are written to the seed corpus and can be re-run with 'go test'.
// Errors within the fuzzing framework (like I/O errors between coordinator
// and worker processes) don't satisfy this interface.
type fuzzCrashError interface {
error
Unwrap() error
// CrashPath returns the path of the subtest that corresponds to the saved
// crash input file in the seed corpus. The test can be re-run with go test
// -run=$test/$name $test is the fuzz test name, and $name is the
// filepath.Base of the string returned here.
CrashPath() string
}
// fuzzContext holds fields common to all fuzz tests.
type fuzzContext struct {
deps testDeps
mode fuzzMode
}
type fuzzMode uint8
const (
seedCorpusOnly fuzzMode = iota
fuzzCoordinator
fuzzWorker
)
// runFuzzTests runs the fuzz tests matching the pattern for -run. This will
// only run the (*F).Fuzz function for each seed corpus without using the
// fuzzing engine to generate or mutate inputs.
func runFuzzTests(deps testDeps, fuzzTests []InternalFuzzTarget, deadline time.Time) (ran, ok bool) {
ok = true
if len(fuzzTests) == 0 || *isFuzzWorker {
return ran, ok
}
m := newMatcher(deps.MatchString, *match, "-test.run", *skip)
var mFuzz *matcher
if *matchFuzz != "" {
mFuzz = newMatcher(deps.MatchString, *matchFuzz, "-test.fuzz", *skip)
}
for _, procs := range cpuList {
runtime.GOMAXPROCS(procs)
for i := uint(0); i < *count; i++ {
if shouldFailFast() {
break
}
tctx := newTestContext(*parallel, m)
tctx.deadline = deadline
fctx := &fuzzContext{deps: deps, mode: seedCorpusOnly}
root := common{w: os.Stdout} // gather output in one place
if Verbose() {
root.chatty = newChattyPrinter(root.w)
}
for _, ft := range fuzzTests {
if shouldFailFast() {
break
}
testName, matched, _ := tctx.match.fullName(nil, ft.Name)
if !matched {
continue
}
if mFuzz != nil {
if _, fuzzMatched, _ := mFuzz.fullName(nil, ft.Name); fuzzMatched {
// If this will be fuzzed, then don't run the seed corpus
// right now. That will happen later.
continue
}
}
f := &F{
common: common{
signal: make(chan bool),
barrier: make(chan bool),
name: testName,
parent: &root,
level: root.level + 1,
chatty: root.chatty,
},
testContext: tctx,
fuzzContext: fctx,
}
f.w = indenter{&f.common}
if f.chatty != nil {
f.chatty.Updatef(f.name, "=== RUN %s\n", f.name)
}
go fRunner(f, ft.Fn)
<-f.signal
if f.chatty != nil && f.chatty.json {
f.chatty.Updatef(f.parent.name, "=== NAME %s\n", f.parent.name)
}
ok = ok && !f.Failed()
ran = ran || f.ran
}
if !ran {
// There were no tests to run on this iteration.
// This won't change, so no reason to keep trying.
break
}
}
}
return ran, ok
}
// runFuzzing runs the fuzz test matching the pattern for -fuzz. Only one such
// fuzz test must match. This will run the fuzzing engine to generate and
// mutate new inputs against the fuzz target.
//
// If fuzzing is disabled (-test.fuzz is not set), runFuzzing
// returns immediately.
func runFuzzing(deps testDeps, fuzzTests []InternalFuzzTarget) (ok bool) {
if len(fuzzTests) == 0 || *matchFuzz == "" {
return true
}
m := newMatcher(deps.MatchString, *matchFuzz, "-test.fuzz", *skip)
tctx := newTestContext(1, m)
tctx.isFuzzing = true
fctx := &fuzzContext{
deps: deps,
}
root := common{w: os.Stdout}
if *isFuzzWorker {
root.w = io.Discard
fctx.mode = fuzzWorker
} else {
fctx.mode = fuzzCoordinator
}
if Verbose() && !*isFuzzWorker {
root.chatty = newChattyPrinter(root.w)
}
var fuzzTest *InternalFuzzTarget
var testName string
var matched []string
for i := range fuzzTests {
name, ok, _ := tctx.match.fullName(nil, fuzzTests[i].Name)
if !ok {
continue
}
matched = append(matched, name)
fuzzTest = &fuzzTests[i]
testName = name
}
if len(matched) == 0 {
fmt.Fprintln(os.Stderr, "testing: warning: no fuzz tests to fuzz")
return true
}
if len(matched) > 1 {
fmt.Fprintf(os.Stderr, "testing: will not fuzz, -fuzz matches more than one fuzz test: %v\n", matched)
return false
}
f := &F{
common: common{
signal: make(chan bool),
barrier: nil, // T.Parallel has no effect when fuzzing.
name: testName,
parent: &root,
level: root.level + 1,
chatty: root.chatty,
},
fuzzContext: fctx,
testContext: tctx,
}
f.w = indenter{&f.common}
if f.chatty != nil {
f.chatty.Updatef(f.name, "=== RUN %s\n", f.name)
}
go fRunner(f, fuzzTest.Fn)
<-f.signal
if f.chatty != nil {
f.chatty.Updatef(f.parent.name, "=== NAME %s\n", f.parent.name)
}
return !f.failed
}
// fRunner wraps a call to a fuzz test and ensures that cleanup functions are
// called and status flags are set. fRunner should be called in its own
// goroutine. To wait for its completion, receive from f.signal.
//
// fRunner is analogous to tRunner, which wraps subtests started with T.Run.
// Unit tests and fuzz tests work a little differently, so for now, these
// functions aren't consolidated. In particular, because there are no F.Run and
// F.Parallel methods, i.e., no fuzz sub-tests or parallel fuzz tests, a few
// simplifications are made. We also require that F.Fuzz, F.Skip, or F.Fail is
// called.
func fRunner(f *F, fn func(*F)) {
// When this goroutine is done, either because runtime.Goexit was called, a
// panic started, or fn returned normally, record the duration and send
// t.signal, indicating the fuzz test is done.
defer func() {
// Detect whether the fuzz test panicked or called runtime.Goexit
// without calling F.Fuzz, F.Fail, or F.Skip. If it did, panic (possibly
// replacing a nil panic value). Nothing should recover after fRunner
// unwinds, so this should crash the process and print stack.
// Unfortunately, recovering here adds stack frames, but the location of
// the original panic should still be
// clear.
f.checkRaces()
if f.Failed() {
numFailed.Add(1)
}
err := recover()
if err == nil {
f.mu.RLock()
fuzzNotCalled := !f.fuzzCalled && !f.skipped && !f.failed
if !f.finished && !f.skipped && !f.failed {
err = errNilPanicOrGoexit
}
f.mu.RUnlock()
if fuzzNotCalled && err == nil {
f.Error("returned without calling F.Fuzz, F.Fail, or F.Skip")
}
}
// Use a deferred call to ensure that we report that the test is
// complete even if a cleanup function calls F.FailNow. See issue 41355.
didPanic := false
defer func() {
if !didPanic {
// Only report that the test is complete if it doesn't panic,
// as otherwise the test binary can exit before the panic is
// reported to the user. See issue 41479.
f.signal <- true
}
}()
// If we recovered a panic or inappropriate runtime.Goexit, fail the test,
// flush the output log up to the root, then panic.
doPanic := func(err any) {
f.Fail()
if r := f.runCleanup(recoverAndReturnPanic); r != nil {
f.Logf("cleanup panicked with %v", r)
}
for root := &f.common; root.parent != nil; root = root.parent {
root.mu.Lock()
root.duration += highPrecisionTimeSince(root.start)
d := root.duration
root.mu.Unlock()
root.flushToParent(root.name, "--- FAIL: %s (%s)\n", root.name, fmtDuration(d))
}
didPanic = true
panic(err)
}
if err != nil {
doPanic(err)
}
// No panic or inappropriate Goexit.
f.duration += highPrecisionTimeSince(f.start)
if len(f.sub) > 0 {
// Unblock inputs that called T.Parallel while running the seed corpus.
// This only affects fuzz tests run as normal tests.
// While fuzzing, T.Parallel has no effect, so f.sub is empty, and this
// branch is not taken. f.barrier is nil in that case.
f.testContext.release()
close(f.barrier)
// Wait for the subtests to complete.
for _, sub := range f.sub {
<-sub.signal
}
cleanupStart := highPrecisionTimeNow()
err := f.runCleanup(recoverAndReturnPanic)
f.duration += highPrecisionTimeSince(cleanupStart)
if err != nil {
doPanic(err)
}
}
// Report after all subtests have finished.
f.report()
f.done = true
f.setRan()
}()
defer func() {
if len(f.sub) == 0 {
f.runCleanup(normalPanic)
}
}()
f.start = highPrecisionTimeNow()
f.resetRaces()
fn(f)
// Code beyond this point will not be executed when FailNow or SkipNow
// is invoked.
f.mu.Lock()
f.finished = true
f.mu.Unlock()
}

116
src/testing/helper_test.go Normal file
View File

@@ -0,0 +1,116 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package testing_test
import (
"internal/testenv"
"os"
"regexp"
"strings"
"testing"
)
func TestTBHelper(t *testing.T) {
if os.Getenv("GO_WANT_HELPER_PROCESS") == "1" {
testTestHelper(t)
// Check that calling Helper from inside a top-level test function
// has no effect.
t.Helper()
t.Error("8")
return
}
testenv.MustHaveExec(t)
t.Parallel()
exe, err := os.Executable()
if err != nil {
t.Fatal(err)
}
cmd := testenv.Command(t, exe, "-test.run=^TestTBHelper$")
cmd = testenv.CleanCmdEnv(cmd)
cmd.Env = append(cmd.Env, "GO_WANT_HELPER_PROCESS=1")
out, _ := cmd.CombinedOutput()
want := `--- FAIL: TestTBHelper \([^)]+\)
helperfuncs_test.go:15: 0
helperfuncs_test.go:47: 1
helperfuncs_test.go:24: 2
helperfuncs_test.go:49: 3
helperfuncs_test.go:56: 4
--- FAIL: TestTBHelper/sub \([^)]+\)
helperfuncs_test.go:59: 5
helperfuncs_test.go:24: 6
helperfuncs_test.go:58: 7
--- FAIL: TestTBHelper/sub2 \([^)]+\)
helperfuncs_test.go:80: 11
helperfuncs_test.go:84: recover 12
helperfuncs_test.go:86: GenericFloat64
helperfuncs_test.go:87: GenericInt
helper_test.go:22: 8
helperfuncs_test.go:73: 9
helperfuncs_test.go:69: 10
`
if !regexp.MustCompile(want).Match(out) {
t.Errorf("got output:\n\n%s\nwant matching:\n\n%s", out, want)
}
}
func TestTBHelperParallel(t *testing.T) {
if os.Getenv("GO_WANT_HELPER_PROCESS") == "1" {
parallelTestHelper(t)
return
}
testenv.MustHaveExec(t)
t.Parallel()
exe, err := os.Executable()
if err != nil {
t.Fatal(err)
}
cmd := testenv.Command(t, exe, "-test.run=^TestTBHelperParallel$")
cmd = testenv.CleanCmdEnv(cmd)
cmd.Env = append(cmd.Env, "GO_WANT_HELPER_PROCESS=1")
out, _ := cmd.CombinedOutput()
t.Logf("output:\n%s", out)
lines := strings.Split(strings.TrimSpace(string(out)), "\n")
// We expect to see one "--- FAIL" line at the start
// of the log, five lines of "parallel" logging,
// and a final "FAIL" line at the end of the test.
const wantLines = 7
if len(lines) != wantLines {
t.Fatalf("parallelTestHelper gave %d lines of output; want %d", len(lines), wantLines)
}
want := "helperfuncs_test.go:24: parallel"
if got := strings.TrimSpace(lines[1]); got != want {
t.Errorf("got second output line %q; want %q", got, want)
}
}
func BenchmarkTBHelper(b *testing.B) {
f1 := func() {
b.Helper()
}
f2 := func() {
b.Helper()
}
b.ResetTimer()
b.ReportAllocs()
for i := 0; i < b.N; i++ {
if i&1 == 0 {
f1()
} else {
f2()
}
}
}

View File

@@ -0,0 +1,124 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package testing_test
import (
"sync"
"testing"
)
// The line numbering of this file is important for TestTBHelper.
func notHelper(t *testing.T, msg string) {
t.Error(msg)
}
func helper(t *testing.T, msg string) {
t.Helper()
t.Error(msg)
}
func notHelperCallingHelper(t *testing.T, msg string) {
helper(t, msg)
}
func helperCallingHelper(t *testing.T, msg string) {
t.Helper()
helper(t, msg)
}
func genericHelper[G any](t *testing.T, msg string) {
t.Helper()
t.Error(msg)
}
var genericIntHelper = genericHelper[int]
func testTestHelper(t *testing.T) {
testHelper(t)
}
func testHelper(t *testing.T) {
// Check combinations of directly and indirectly
// calling helper functions.
notHelper(t, "0")
helper(t, "1")
notHelperCallingHelper(t, "2")
helperCallingHelper(t, "3")
// Check a function literal closing over t that uses Helper.
fn := func(msg string) {
t.Helper()
t.Error(msg)
}
fn("4")
t.Run("sub", func(t *testing.T) {
helper(t, "5")
notHelperCallingHelper(t, "6")
// Check that calling Helper from inside a subtest entry function
// works as if it were in an ordinary function call.
t.Helper()
t.Error("7")
})
// Check that right caller is reported for func passed to Cleanup when
// multiple cleanup functions have been registered.
t.Cleanup(func() {
t.Helper()
t.Error("10")
})
t.Cleanup(func() {
t.Helper()
t.Error("9")
})
// Check that helper-ness propagates up through subtests
// to helpers above. See https://golang.org/issue/44887.
helperSubCallingHelper(t, "11")
// Check that helper-ness propagates up through panic/recover.
// See https://golang.org/issue/31154.
recoverHelper(t, "12")
genericHelper[float64](t, "GenericFloat64")
genericIntHelper(t, "GenericInt")
}
func parallelTestHelper(t *testing.T) {
var wg sync.WaitGroup
for i := 0; i < 5; i++ {
wg.Add(1)
go func() {
notHelperCallingHelper(t, "parallel")
wg.Done()
}()
}
wg.Wait()
}
func helperSubCallingHelper(t *testing.T, msg string) {
t.Helper()
t.Run("sub2", func(t *testing.T) {
t.Helper()
t.Fatal(msg)
})
}
func recoverHelper(t *testing.T, msg string) {
t.Helper()
defer func() {
t.Helper()
if err := recover(); err != nil {
t.Errorf("recover %s", err)
}
}()
doPanic(t, msg)
}
func doPanic(t *testing.T, msg string) {
t.Helper()
panic(msg)
}

View File

@@ -0,0 +1,240 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package testdeps provides access to dependencies needed by test execution.
//
// This package is imported by the generated main package, which passes
// TestDeps into testing.Main. This allows tests to use packages at run time
// without making those packages direct dependencies of package testing.
// Direct dependencies of package testing are harder to write tests for.
package testdeps
import (
"bufio"
"context"
"internal/fuzz"
"internal/testlog"
"io"
"os"
"os/signal"
"reflect"
"regexp"
"runtime/pprof"
"strings"
"sync"
"time"
)
// Cover indicates whether coverage is enabled.
var Cover bool
// TestDeps is an implementation of the testing.testDeps interface,
// suitable for passing to [testing.MainStart].
type TestDeps struct{}
var matchPat string
var matchRe *regexp.Regexp
func (TestDeps) MatchString(pat, str string) (result bool, err error) {
if matchRe == nil || matchPat != pat {
matchPat = pat
matchRe, err = regexp.Compile(matchPat)
if err != nil {
return
}
}
return matchRe.MatchString(str), nil
}
func (TestDeps) StartCPUProfile(w io.Writer) error {
return pprof.StartCPUProfile(w)
}
func (TestDeps) StopCPUProfile() {
pprof.StopCPUProfile()
}
func (TestDeps) WriteProfileTo(name string, w io.Writer, debug int) error {
return pprof.Lookup(name).WriteTo(w, debug)
}
// ImportPath is the import path of the testing binary, set by the generated main function.
var ImportPath string
func (TestDeps) ImportPath() string {
return ImportPath
}
// testLog implements testlog.Interface, logging actions by package os.
type testLog struct {
mu sync.Mutex
w *bufio.Writer
set bool
}
func (l *testLog) Getenv(key string) {
l.add("getenv", key)
}
func (l *testLog) Open(name string) {
l.add("open", name)
}
func (l *testLog) Stat(name string) {
l.add("stat", name)
}
func (l *testLog) Chdir(name string) {
l.add("chdir", name)
}
// add adds the (op, name) pair to the test log.
func (l *testLog) add(op, name string) {
if strings.Contains(name, "\n") || name == "" {
return
}
l.mu.Lock()
defer l.mu.Unlock()
if l.w == nil {
return
}
l.w.WriteString(op)
l.w.WriteByte(' ')
l.w.WriteString(name)
l.w.WriteByte('\n')
}
var log testLog
func (TestDeps) StartTestLog(w io.Writer) {
log.mu.Lock()
log.w = bufio.NewWriter(w)
if !log.set {
// Tests that define TestMain and then run m.Run multiple times
// will call StartTestLog/StopTestLog multiple times.
// Checking log.set avoids calling testlog.SetLogger multiple times
// (which will panic) and also avoids writing the header multiple times.
log.set = true
testlog.SetLogger(&log)
log.w.WriteString("# test log\n") // known to cmd/go/internal/test/test.go
}
log.mu.Unlock()
}
func (TestDeps) StopTestLog() error {
log.mu.Lock()
defer log.mu.Unlock()
err := log.w.Flush()
log.w = nil
return err
}
// SetPanicOnExit0 tells the os package whether to panic on os.Exit(0).
func (TestDeps) SetPanicOnExit0(v bool) {
testlog.SetPanicOnExit0(v)
}
func (TestDeps) CoordinateFuzzing(
timeout time.Duration,
limit int64,
minimizeTimeout time.Duration,
minimizeLimit int64,
parallel int,
seed []fuzz.CorpusEntry,
types []reflect.Type,
corpusDir,
cacheDir string) (err error) {
// Fuzzing may be interrupted with a timeout or if the user presses ^C.
// In either case, we'll stop worker processes gracefully and save
// crashers and interesting values.
ctx, cancel := signal.NotifyContext(context.Background(), os.Interrupt)
defer cancel()
err = fuzz.CoordinateFuzzing(ctx, fuzz.CoordinateFuzzingOpts{
Log: os.Stderr,
Timeout: timeout,
Limit: limit,
MinimizeTimeout: minimizeTimeout,
MinimizeLimit: minimizeLimit,
Parallel: parallel,
Seed: seed,
Types: types,
CorpusDir: corpusDir,
CacheDir: cacheDir,
})
if err == ctx.Err() {
return nil
}
return err
}
func (TestDeps) RunFuzzWorker(fn func(fuzz.CorpusEntry) error) error {
// Worker processes may or may not receive a signal when the user presses ^C
// On POSIX operating systems, a signal sent to a process group is delivered
// to all processes in that group. This is not the case on Windows.
// If the worker is interrupted, return quickly and without error.
// If only the coordinator process is interrupted, it tells each worker
// process to stop by closing its "fuzz_in" pipe.
ctx, cancel := signal.NotifyContext(context.Background(), os.Interrupt)
defer cancel()
err := fuzz.RunFuzzWorker(ctx, fn)
if err == ctx.Err() {
return nil
}
return err
}
func (TestDeps) ReadCorpus(dir string, types []reflect.Type) ([]fuzz.CorpusEntry, error) {
return fuzz.ReadCorpus(dir, types)
}
func (TestDeps) CheckCorpus(vals []any, types []reflect.Type) error {
return fuzz.CheckCorpus(vals, types)
}
func (TestDeps) ResetCoverage() {
fuzz.ResetCoverage()
}
func (TestDeps) SnapshotCoverage() {
fuzz.SnapshotCoverage()
}
var CoverMode string
var Covered string
var CoverSelectedPackages []string
// These variables below are set at runtime (via code in testmain) to point
// to the equivalent functions in package internal/coverage/cfile; doing
// things this way allows us to have tests import internal/coverage/cfile
// only when -cover is in effect (as opposed to importing for all tests).
var (
CoverSnapshotFunc func() float64
CoverProcessTestDirFunc func(dir string, cfile string, cm string, cpkg string, w io.Writer, selpkgs []string) error
CoverMarkProfileEmittedFunc func(val bool)
)
func (TestDeps) InitRuntimeCoverage() (mode string, tearDown func(string, string) (string, error), snapcov func() float64) {
if CoverMode == "" {
return
}
return CoverMode, coverTearDown, CoverSnapshotFunc
}
func coverTearDown(coverprofile string, gocoverdir string) (string, error) {
var err error
if gocoverdir == "" {
gocoverdir, err = os.MkdirTemp("", "gocoverdir")
if err != nil {
return "error setting GOCOVERDIR: bad os.MkdirTemp return", err
}
defer os.RemoveAll(gocoverdir)
}
CoverMarkProfileEmittedFunc(true)
cmode := CoverMode
if err := CoverProcessTestDirFunc(gocoverdir, coverprofile, cmode, Covered, os.Stdout, CoverSelectedPackages); err != nil {
return "error generating coverage report", err
}
return "", nil
}

View File

@@ -0,0 +1,22 @@
// Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package iotest_test
import (
"errors"
"fmt"
"testing/iotest"
)
func ExampleErrReader() {
// A reader that always returns a custom error.
r := iotest.ErrReader(errors.New("custom error"))
n, err := r.Read(nil)
fmt.Printf("n: %d\nerr: %q\n", n, err)
// Output:
// n: 0
// err: "custom error"
}

View File

@@ -0,0 +1,54 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package iotest
import (
"io"
"log"
)
type writeLogger struct {
prefix string
w io.Writer
}
func (l *writeLogger) Write(p []byte) (n int, err error) {
n, err = l.w.Write(p)
if err != nil {
log.Printf("%s %x: %v", l.prefix, p[0:n], err)
} else {
log.Printf("%s %x", l.prefix, p[0:n])
}
return
}
// NewWriteLogger returns a writer that behaves like w except
// that it logs (using [log.Printf]) each write to standard error,
// printing the prefix and the hexadecimal data written.
func NewWriteLogger(prefix string, w io.Writer) io.Writer {
return &writeLogger{prefix, w}
}
type readLogger struct {
prefix string
r io.Reader
}
func (l *readLogger) Read(p []byte) (n int, err error) {
n, err = l.r.Read(p)
if err != nil {
log.Printf("%s %x: %v", l.prefix, p[0:n], err)
} else {
log.Printf("%s %x", l.prefix, p[0:n])
}
return
}
// NewReadLogger returns a reader that behaves like r except
// that it logs (using [log.Printf]) each read to standard error,
// printing the prefix and the hexadecimal data read.
func NewReadLogger(prefix string, r io.Reader) io.Reader {
return &readLogger{prefix, r}
}

View File

@@ -0,0 +1,153 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package iotest
import (
"bytes"
"errors"
"fmt"
"log"
"strings"
"testing"
)
type errWriter struct {
err error
}
func (w errWriter) Write([]byte) (int, error) {
return 0, w.err
}
func TestWriteLogger(t *testing.T) {
olw := log.Writer()
olf := log.Flags()
olp := log.Prefix()
// Revert the original log settings before we exit.
defer func() {
log.SetFlags(olf)
log.SetPrefix(olp)
log.SetOutput(olw)
}()
lOut := new(strings.Builder)
log.SetPrefix("lw: ")
log.SetOutput(lOut)
log.SetFlags(0)
lw := new(strings.Builder)
wl := NewWriteLogger("write:", lw)
if _, err := wl.Write([]byte("Hello, World!")); err != nil {
t.Fatalf("Unexpectedly failed to write: %v", err)
}
if g, w := lw.String(), "Hello, World!"; g != w {
t.Errorf("WriteLogger mismatch\n\tgot: %q\n\twant: %q", g, w)
}
wantLogWithHex := fmt.Sprintf("lw: write: %x\n", "Hello, World!")
if g, w := lOut.String(), wantLogWithHex; g != w {
t.Errorf("WriteLogger mismatch\n\tgot: %q\n\twant: %q", g, w)
}
}
func TestWriteLogger_errorOnWrite(t *testing.T) {
olw := log.Writer()
olf := log.Flags()
olp := log.Prefix()
// Revert the original log settings before we exit.
defer func() {
log.SetFlags(olf)
log.SetPrefix(olp)
log.SetOutput(olw)
}()
lOut := new(strings.Builder)
log.SetPrefix("lw: ")
log.SetOutput(lOut)
log.SetFlags(0)
lw := errWriter{err: errors.New("Write Error!")}
wl := NewWriteLogger("write:", lw)
if _, err := wl.Write([]byte("Hello, World!")); err == nil {
t.Fatalf("Unexpectedly succeeded to write: %v", err)
}
wantLogWithHex := fmt.Sprintf("lw: write: %x: %v\n", "", "Write Error!")
if g, w := lOut.String(), wantLogWithHex; g != w {
t.Errorf("WriteLogger mismatch\n\tgot: %q\n\twant: %q", g, w)
}
}
func TestReadLogger(t *testing.T) {
olw := log.Writer()
olf := log.Flags()
olp := log.Prefix()
// Revert the original log settings before we exit.
defer func() {
log.SetFlags(olf)
log.SetPrefix(olp)
log.SetOutput(olw)
}()
lOut := new(strings.Builder)
log.SetPrefix("lr: ")
log.SetOutput(lOut)
log.SetFlags(0)
data := []byte("Hello, World!")
p := make([]byte, len(data))
lr := bytes.NewReader(data)
rl := NewReadLogger("read:", lr)
n, err := rl.Read(p)
if err != nil {
t.Fatalf("Unexpectedly failed to read: %v", err)
}
if g, w := p[:n], data; !bytes.Equal(g, w) {
t.Errorf("ReadLogger mismatch\n\tgot: %q\n\twant: %q", g, w)
}
wantLogWithHex := fmt.Sprintf("lr: read: %x\n", "Hello, World!")
if g, w := lOut.String(), wantLogWithHex; g != w {
t.Errorf("ReadLogger mismatch\n\tgot: %q\n\twant: %q", g, w)
}
}
func TestReadLogger_errorOnRead(t *testing.T) {
olw := log.Writer()
olf := log.Flags()
olp := log.Prefix()
// Revert the original log settings before we exit.
defer func() {
log.SetFlags(olf)
log.SetPrefix(olp)
log.SetOutput(olw)
}()
lOut := new(strings.Builder)
log.SetPrefix("lr: ")
log.SetOutput(lOut)
log.SetFlags(0)
data := []byte("Hello, World!")
p := make([]byte, len(data))
lr := ErrReader(errors.New("io failure"))
rl := NewReadLogger("read", lr)
n, err := rl.Read(p)
if err == nil {
t.Fatalf("Unexpectedly succeeded to read: %v", err)
}
wantLogWithHex := fmt.Sprintf("lr: read %x: io failure\n", p[:n])
if g, w := lOut.String(), wantLogWithHex; g != w {
t.Errorf("ReadLogger mismatch\n\tgot: %q\n\twant: %q", g, w)
}
}

View File

@@ -0,0 +1,268 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package iotest implements Readers and Writers useful mainly for testing.
package iotest
import (
"bytes"
"errors"
"fmt"
"io"
)
// OneByteReader returns a Reader that implements
// each non-empty Read by reading one byte from r.
func OneByteReader(r io.Reader) io.Reader { return &oneByteReader{r} }
type oneByteReader struct {
r io.Reader
}
func (r *oneByteReader) Read(p []byte) (int, error) {
if len(p) == 0 {
return 0, nil
}
return r.r.Read(p[0:1])
}
// HalfReader returns a Reader that implements Read
// by reading half as many requested bytes from r.
func HalfReader(r io.Reader) io.Reader { return &halfReader{r} }
type halfReader struct {
r io.Reader
}
func (r *halfReader) Read(p []byte) (int, error) {
return r.r.Read(p[0 : (len(p)+1)/2])
}
// DataErrReader changes the way errors are handled by a Reader. Normally, a
// Reader returns an error (typically EOF) from the first Read call after the
// last piece of data is read. DataErrReader wraps a Reader and changes its
// behavior so the final error is returned along with the final data, instead
// of in the first call after the final data.
func DataErrReader(r io.Reader) io.Reader { return &dataErrReader{r, nil, make([]byte, 1024)} }
type dataErrReader struct {
r io.Reader
unread []byte
data []byte
}
func (r *dataErrReader) Read(p []byte) (n int, err error) {
// loop because first call needs two reads:
// one to get data and a second to look for an error.
for {
if len(r.unread) == 0 {
n1, err1 := r.r.Read(r.data)
r.unread = r.data[0:n1]
err = err1
}
if n > 0 || err != nil {
break
}
n = copy(p, r.unread)
r.unread = r.unread[n:]
}
return
}
// ErrTimeout is a fake timeout error.
var ErrTimeout = errors.New("timeout")
// TimeoutReader returns [ErrTimeout] on the second read
// with no data. Subsequent calls to read succeed.
func TimeoutReader(r io.Reader) io.Reader { return &timeoutReader{r, 0} }
type timeoutReader struct {
r io.Reader
count int
}
func (r *timeoutReader) Read(p []byte) (int, error) {
r.count++
if r.count == 2 {
return 0, ErrTimeout
}
return r.r.Read(p)
}
// ErrReader returns an [io.Reader] that returns 0, err from all Read calls.
func ErrReader(err error) io.Reader {
return &errReader{err: err}
}
type errReader struct {
err error
}
func (r *errReader) Read(p []byte) (int, error) {
return 0, r.err
}
type smallByteReader struct {
r io.Reader
off int
n int
}
func (r *smallByteReader) Read(p []byte) (int, error) {
if len(p) == 0 {
return 0, nil
}
r.n = r.n%3 + 1
n := r.n
if n > len(p) {
n = len(p)
}
n, err := r.r.Read(p[0:n])
if err != nil && err != io.EOF {
err = fmt.Errorf("Read(%d bytes at offset %d): %v", n, r.off, err)
}
r.off += n
return n, err
}
// TestReader tests that reading from r returns the expected file content.
// It does reads of different sizes, until EOF.
// If r implements [io.ReaderAt] or [io.Seeker], TestReader also checks
// that those operations behave as they should.
//
// If TestReader finds any misbehaviors, it returns an error reporting them.
// The error text may span multiple lines.
func TestReader(r io.Reader, content []byte) error {
if len(content) > 0 {
n, err := r.Read(nil)
if n != 0 || err != nil {
return fmt.Errorf("Read(0) = %d, %v, want 0, nil", n, err)
}
}
data, err := io.ReadAll(&smallByteReader{r: r})
if err != nil {
return err
}
if !bytes.Equal(data, content) {
return fmt.Errorf("ReadAll(small amounts) = %q\n\twant %q", data, content)
}
n, err := r.Read(make([]byte, 10))
if n != 0 || err != io.EOF {
return fmt.Errorf("Read(10) at EOF = %v, %v, want 0, EOF", n, err)
}
if r, ok := r.(io.ReadSeeker); ok {
// Seek(0, 1) should report the current file position (EOF).
if off, err := r.Seek(0, 1); off != int64(len(content)) || err != nil {
return fmt.Errorf("Seek(0, 1) from EOF = %d, %v, want %d, nil", off, err, len(content))
}
// Seek backward partway through file, in two steps.
// If middle == 0, len(content) == 0, can't use the -1 and +1 seeks.
middle := len(content) - len(content)/3
if middle > 0 {
if off, err := r.Seek(-1, 1); off != int64(len(content)-1) || err != nil {
return fmt.Errorf("Seek(-1, 1) from EOF = %d, %v, want %d, nil", -off, err, len(content)-1)
}
if off, err := r.Seek(int64(-len(content)/3), 1); off != int64(middle-1) || err != nil {
return fmt.Errorf("Seek(%d, 1) from %d = %d, %v, want %d, nil", -len(content)/3, len(content)-1, off, err, middle-1)
}
if off, err := r.Seek(+1, 1); off != int64(middle) || err != nil {
return fmt.Errorf("Seek(+1, 1) from %d = %d, %v, want %d, nil", middle-1, off, err, middle)
}
}
// Seek(0, 1) should report the current file position (middle).
if off, err := r.Seek(0, 1); off != int64(middle) || err != nil {
return fmt.Errorf("Seek(0, 1) from %d = %d, %v, want %d, nil", middle, off, err, middle)
}
// Reading forward should return the last part of the file.
data, err := io.ReadAll(&smallByteReader{r: r})
if err != nil {
return fmt.Errorf("ReadAll from offset %d: %v", middle, err)
}
if !bytes.Equal(data, content[middle:]) {
return fmt.Errorf("ReadAll from offset %d = %q\n\twant %q", middle, data, content[middle:])
}
// Seek relative to end of file, but start elsewhere.
if off, err := r.Seek(int64(middle/2), 0); off != int64(middle/2) || err != nil {
return fmt.Errorf("Seek(%d, 0) from EOF = %d, %v, want %d, nil", middle/2, off, err, middle/2)
}
if off, err := r.Seek(int64(-len(content)/3), 2); off != int64(middle) || err != nil {
return fmt.Errorf("Seek(%d, 2) from %d = %d, %v, want %d, nil", -len(content)/3, middle/2, off, err, middle)
}
// Reading forward should return the last part of the file (again).
data, err = io.ReadAll(&smallByteReader{r: r})
if err != nil {
return fmt.Errorf("ReadAll from offset %d: %v", middle, err)
}
if !bytes.Equal(data, content[middle:]) {
return fmt.Errorf("ReadAll from offset %d = %q\n\twant %q", middle, data, content[middle:])
}
// Absolute seek & read forward.
if off, err := r.Seek(int64(middle/2), 0); off != int64(middle/2) || err != nil {
return fmt.Errorf("Seek(%d, 0) from EOF = %d, %v, want %d, nil", middle/2, off, err, middle/2)
}
data, err = io.ReadAll(r)
if err != nil {
return fmt.Errorf("ReadAll from offset %d: %v", middle/2, err)
}
if !bytes.Equal(data, content[middle/2:]) {
return fmt.Errorf("ReadAll from offset %d = %q\n\twant %q", middle/2, data, content[middle/2:])
}
}
if r, ok := r.(io.ReaderAt); ok {
data := make([]byte, len(content), len(content)+1)
for i := range data {
data[i] = 0xfe
}
n, err := r.ReadAt(data, 0)
if n != len(data) || err != nil && err != io.EOF {
return fmt.Errorf("ReadAt(%d, 0) = %v, %v, want %d, nil or EOF", len(data), n, err, len(data))
}
if !bytes.Equal(data, content) {
return fmt.Errorf("ReadAt(%d, 0) = %q\n\twant %q", len(data), data, content)
}
n, err = r.ReadAt(data[:1], int64(len(data)))
if n != 0 || err != io.EOF {
return fmt.Errorf("ReadAt(1, %d) = %v, %v, want 0, EOF", len(data), n, err)
}
for i := range data {
data[i] = 0xfe
}
n, err = r.ReadAt(data[:cap(data)], 0)
if n != len(data) || err != io.EOF {
return fmt.Errorf("ReadAt(%d, 0) = %v, %v, want %d, EOF", cap(data), n, err, len(data))
}
if !bytes.Equal(data, content) {
return fmt.Errorf("ReadAt(%d, 0) = %q\n\twant %q", len(data), data, content)
}
for i := range data {
data[i] = 0xfe
}
for i := range data {
n, err = r.ReadAt(data[i:i+1], int64(i))
if n != 1 || err != nil && (i != len(data)-1 || err != io.EOF) {
want := "nil"
if i == len(data)-1 {
want = "nil or EOF"
}
return fmt.Errorf("ReadAt(1, %d) = %v, %v, want 1, %s", i, n, err, want)
}
if data[i] != content[i] {
return fmt.Errorf("ReadAt(1, %d) = %q want %q", i, data[i:i+1], content[i:i+1])
}
}
}
return nil
}

View File

@@ -0,0 +1,261 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package iotest
import (
"bytes"
"errors"
"io"
"strings"
"testing"
)
func TestOneByteReader_nonEmptyReader(t *testing.T) {
msg := "Hello, World!"
buf := new(bytes.Buffer)
buf.WriteString(msg)
obr := OneByteReader(buf)
var b []byte
n, err := obr.Read(b)
if err != nil || n != 0 {
t.Errorf("Empty buffer read returned n=%d err=%v", n, err)
}
b = make([]byte, 3)
// Read from obr until EOF.
got := new(strings.Builder)
for i := 0; ; i++ {
n, err = obr.Read(b)
if err != nil {
break
}
if g, w := n, 1; g != w {
t.Errorf("Iteration #%d read %d bytes, want %d", i, g, w)
}
got.Write(b[:n])
}
if g, w := err, io.EOF; g != w {
t.Errorf("Unexpected error after reading all bytes\n\tGot: %v\n\tWant: %v", g, w)
}
if g, w := got.String(), "Hello, World!"; g != w {
t.Errorf("Read mismatch\n\tGot: %q\n\tWant: %q", g, w)
}
}
func TestOneByteReader_emptyReader(t *testing.T) {
r := new(bytes.Buffer)
obr := OneByteReader(r)
var b []byte
if n, err := obr.Read(b); err != nil || n != 0 {
t.Errorf("Empty buffer read returned n=%d err=%v", n, err)
}
b = make([]byte, 5)
n, err := obr.Read(b)
if g, w := err, io.EOF; g != w {
t.Errorf("Error mismatch\n\tGot: %v\n\tWant: %v", g, w)
}
if g, w := n, 0; g != w {
t.Errorf("Unexpectedly read %d bytes, wanted %d", g, w)
}
}
func TestHalfReader_nonEmptyReader(t *testing.T) {
msg := "Hello, World!"
buf := new(bytes.Buffer)
buf.WriteString(msg)
// empty read buffer
hr := HalfReader(buf)
var b []byte
n, err := hr.Read(b)
if err != nil || n != 0 {
t.Errorf("Empty buffer read returned n=%d err=%v", n, err)
}
// non empty read buffer
b = make([]byte, 2)
got := new(strings.Builder)
for i := 0; ; i++ {
n, err = hr.Read(b)
if err != nil {
break
}
if g, w := n, 1; g != w {
t.Errorf("Iteration #%d read %d bytes, want %d", i, g, w)
}
got.Write(b[:n])
}
if g, w := err, io.EOF; g != w {
t.Errorf("Unexpected error after reading all bytes\n\tGot: %v\n\tWant: %v", g, w)
}
if g, w := got.String(), "Hello, World!"; g != w {
t.Errorf("Read mismatch\n\tGot: %q\n\tWant: %q", g, w)
}
}
func TestHalfReader_emptyReader(t *testing.T) {
r := new(bytes.Buffer)
hr := HalfReader(r)
var b []byte
if n, err := hr.Read(b); err != nil || n != 0 {
t.Errorf("Empty buffer read returned n=%d err=%v", n, err)
}
b = make([]byte, 5)
n, err := hr.Read(b)
if g, w := err, io.EOF; g != w {
t.Errorf("Error mismatch\n\tGot: %v\n\tWant: %v", g, w)
}
if g, w := n, 0; g != w {
t.Errorf("Unexpectedly read %d bytes, wanted %d", g, w)
}
}
func TestTimeOutReader_nonEmptyReader(t *testing.T) {
msg := "Hello, World!"
buf := new(bytes.Buffer)
buf.WriteString(msg)
// empty read buffer
tor := TimeoutReader(buf)
var b []byte
n, err := tor.Read(b)
if err != nil || n != 0 {
t.Errorf("Empty buffer read returned n=%d err=%v", n, err)
}
// Second call should timeout
n, err = tor.Read(b)
if g, w := err, ErrTimeout; g != w {
t.Errorf("Error mismatch\n\tGot: %v\n\tWant: %v", g, w)
}
if g, w := n, 0; g != w {
t.Errorf("Unexpectedly read %d bytes, wanted %d", g, w)
}
// non empty read buffer
tor2 := TimeoutReader(buf)
b = make([]byte, 3)
if n, err := tor2.Read(b); err != nil || n == 0 {
t.Errorf("Empty buffer read returned n=%d err=%v", n, err)
}
// Second call should timeout
n, err = tor2.Read(b)
if g, w := err, ErrTimeout; g != w {
t.Errorf("Error mismatch\n\tGot: %v\n\tWant: %v", g, w)
}
if g, w := n, 0; g != w {
t.Errorf("Unexpectedly read %d bytes, wanted %d", g, w)
}
}
func TestTimeOutReader_emptyReader(t *testing.T) {
r := new(bytes.Buffer)
// empty read buffer
tor := TimeoutReader(r)
var b []byte
if n, err := tor.Read(b); err != nil || n != 0 {
t.Errorf("Empty buffer read returned n=%d err=%v", n, err)
}
// Second call should timeout
n, err := tor.Read(b)
if g, w := err, ErrTimeout; g != w {
t.Errorf("Error mismatch\n\tGot: %v\n\tWant: %v", g, w)
}
if g, w := n, 0; g != w {
t.Errorf("Unexpectedly read %d bytes, wanted %d", g, w)
}
// non empty read buffer
tor2 := TimeoutReader(r)
b = make([]byte, 5)
if n, err := tor2.Read(b); err != io.EOF || n != 0 {
t.Errorf("Empty buffer read returned n=%d err=%v", n, err)
}
// Second call should timeout
n, err = tor2.Read(b)
if g, w := err, ErrTimeout; g != w {
t.Errorf("Error mismatch\n\tGot: %v\n\tWant: %v", g, w)
}
if g, w := n, 0; g != w {
t.Errorf("Unexpectedly read %d bytes, wanted %d", g, w)
}
}
func TestDataErrReader_nonEmptyReader(t *testing.T) {
msg := "Hello, World!"
buf := new(bytes.Buffer)
buf.WriteString(msg)
der := DataErrReader(buf)
b := make([]byte, 3)
got := new(strings.Builder)
var n int
var err error
for {
n, err = der.Read(b)
got.Write(b[:n])
if err != nil {
break
}
}
if err != io.EOF || n == 0 {
t.Errorf("Last Read returned n=%d err=%v", n, err)
}
if g, w := got.String(), "Hello, World!"; g != w {
t.Errorf("Read mismatch\n\tGot: %q\n\tWant: %q", g, w)
}
}
func TestDataErrReader_emptyReader(t *testing.T) {
r := new(bytes.Buffer)
der := DataErrReader(r)
var b []byte
if n, err := der.Read(b); err != io.EOF || n != 0 {
t.Errorf("Empty buffer read returned n=%d err=%v", n, err)
}
b = make([]byte, 5)
n, err := der.Read(b)
if g, w := err, io.EOF; g != w {
t.Errorf("Error mismatch\n\tGot: %v\n\tWant: %v", g, w)
}
if g, w := n, 0; g != w {
t.Errorf("Unexpectedly read %d bytes, wanted %d", g, w)
}
}
func TestErrReader(t *testing.T) {
cases := []struct {
name string
err error
}{
{"nil error", nil},
{"non-nil error", errors.New("io failure")},
{"io.EOF", io.EOF},
}
for _, tt := range cases {
tt := tt
t.Run(tt.name, func(t *testing.T) {
n, err := ErrReader(tt.err).Read(nil)
if err != tt.err {
t.Fatalf("Error mismatch\nGot: %v\nWant: %v", err, tt.err)
}
if n != 0 {
t.Fatalf("Byte count mismatch: got %d want 0", n)
}
})
}
}
func TestStringsReader(t *testing.T) {
const msg = "Now is the time for all good gophers."
r := strings.NewReader(msg)
if err := TestReader(r, []byte(msg)); err != nil {
t.Fatal(err)
}
}

View File

@@ -0,0 +1,35 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package iotest
import "io"
// TruncateWriter returns a Writer that writes to w
// but stops silently after n bytes.
func TruncateWriter(w io.Writer, n int64) io.Writer {
return &truncateWriter{w, n}
}
type truncateWriter struct {
w io.Writer
n int64
}
func (t *truncateWriter) Write(p []byte) (n int, err error) {
if t.n <= 0 {
return len(p), nil
}
// real write
n = len(p)
if int64(n) > t.n {
n = int(t.n)
}
n, err = t.w.Write(p[0:n])
t.n -= int64(n)
if err == nil {
n = len(p)
}
return
}

View File

@@ -0,0 +1,39 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package iotest
import (
"strings"
"testing"
)
var truncateWriterTests = []struct {
in string
want string
trunc int64
n int
}{
{"hello", "", -1, 5},
{"world", "", 0, 5},
{"abcde", "abc", 3, 5},
{"edcba", "edcba", 7, 5},
}
func TestTruncateWriter(t *testing.T) {
for _, tt := range truncateWriterTests {
buf := new(strings.Builder)
tw := TruncateWriter(buf, tt.trunc)
n, err := tw.Write([]byte(tt.in))
if err != nil {
t.Errorf("Unexpected error %v for\n\t%+v", err, tt)
}
if g, w := buf.String(), tt.want; g != w {
t.Errorf("got %q, expected %q", g, w)
}
if g, w := n, tt.n; g != w {
t.Errorf("read %d bytes, but expected to have read %d bytes for\n\t%+v", g, w, tt)
}
}
}

317
src/testing/match.go Normal file
View File

@@ -0,0 +1,317 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package testing
import (
"fmt"
"os"
"strconv"
"strings"
"sync"
)
// matcher sanitizes, uniques, and filters names of subtests and subbenchmarks.
type matcher struct {
filter filterMatch
skip filterMatch
matchFunc func(pat, str string) (bool, error)
mu sync.Mutex
// subNames is used to deduplicate subtest names.
// Each key is the subtest name joined to the deduplicated name of the parent test.
// Each value is the count of the number of occurrences of the given subtest name
// already seen.
subNames map[string]int32
}
type filterMatch interface {
// matches checks the name against the receiver's pattern strings using the
// given match function.
matches(name []string, matchString func(pat, str string) (bool, error)) (ok, partial bool)
// verify checks that the receiver's pattern strings are valid filters by
// calling the given match function.
verify(name string, matchString func(pat, str string) (bool, error)) error
}
// simpleMatch matches a test name if all of the pattern strings match in
// sequence.
type simpleMatch []string
// alternationMatch matches a test name if one of the alternations match.
type alternationMatch []filterMatch
// TODO: fix test_main to avoid race and improve caching, also allowing to
// eliminate this Mutex.
var matchMutex sync.Mutex
func allMatcher() *matcher {
return newMatcher(nil, "", "", "")
}
func newMatcher(matchString func(pat, str string) (bool, error), patterns, name, skips string) *matcher {
var filter, skip filterMatch
if patterns == "" {
filter = simpleMatch{} // always partial true
} else {
filter = splitRegexp(patterns)
if err := filter.verify(name, matchString); err != nil {
fmt.Fprintf(os.Stderr, "testing: invalid regexp for %s\n", err)
os.Exit(1)
}
}
if skips == "" {
skip = alternationMatch{} // always false
} else {
skip = splitRegexp(skips)
if err := skip.verify("-test.skip", matchString); err != nil {
fmt.Fprintf(os.Stderr, "testing: invalid regexp for %v\n", err)
os.Exit(1)
}
}
return &matcher{
filter: filter,
skip: skip,
matchFunc: matchString,
subNames: map[string]int32{},
}
}
func (m *matcher) fullName(c *common, subname string) (name string, ok, partial bool) {
name = subname
m.mu.Lock()
defer m.mu.Unlock()
if c != nil && c.level > 0 {
name = m.unique(c.name, rewrite(subname))
}
matchMutex.Lock()
defer matchMutex.Unlock()
// We check the full array of paths each time to allow for the case that a pattern contains a '/'.
elem := strings.Split(name, "/")
// filter must match.
// accept partial match that may produce full match later.
ok, partial = m.filter.matches(elem, m.matchFunc)
if !ok {
return name, false, false
}
// skip must not match.
// ignore partial match so we can get to more precise match later.
skip, partialSkip := m.skip.matches(elem, m.matchFunc)
if skip && !partialSkip {
return name, false, false
}
return name, ok, partial
}
// clearSubNames clears the matcher's internal state, potentially freeing
// memory. After this is called, T.Name may return the same strings as it did
// for earlier subtests.
func (m *matcher) clearSubNames() {
m.mu.Lock()
defer m.mu.Unlock()
clear(m.subNames)
}
func (m simpleMatch) matches(name []string, matchString func(pat, str string) (bool, error)) (ok, partial bool) {
for i, s := range name {
if i >= len(m) {
break
}
if ok, _ := matchString(m[i], s); !ok {
return false, false
}
}
return true, len(name) < len(m)
}
func (m simpleMatch) verify(name string, matchString func(pat, str string) (bool, error)) error {
for i, s := range m {
m[i] = rewrite(s)
}
// Verify filters before doing any processing.
for i, s := range m {
if _, err := matchString(s, "non-empty"); err != nil {
return fmt.Errorf("element %d of %s (%q): %s", i, name, s, err)
}
}
return nil
}
func (m alternationMatch) matches(name []string, matchString func(pat, str string) (bool, error)) (ok, partial bool) {
for _, m := range m {
if ok, partial = m.matches(name, matchString); ok {
return ok, partial
}
}
return false, false
}
func (m alternationMatch) verify(name string, matchString func(pat, str string) (bool, error)) error {
for i, m := range m {
if err := m.verify(name, matchString); err != nil {
return fmt.Errorf("alternation %d of %s", i, err)
}
}
return nil
}
func splitRegexp(s string) filterMatch {
a := make(simpleMatch, 0, strings.Count(s, "/"))
b := make(alternationMatch, 0, strings.Count(s, "|"))
cs := 0
cp := 0
for i := 0; i < len(s); {
switch s[i] {
case '[':
cs++
case ']':
if cs--; cs < 0 { // An unmatched ']' is legal.
cs = 0
}
case '(':
if cs == 0 {
cp++
}
case ')':
if cs == 0 {
cp--
}
case '\\':
i++
case '/':
if cs == 0 && cp == 0 {
a = append(a, s[:i])
s = s[i+1:]
i = 0
continue
}
case '|':
if cs == 0 && cp == 0 {
a = append(a, s[:i])
s = s[i+1:]
i = 0
b = append(b, a)
a = make(simpleMatch, 0, len(a))
continue
}
}
i++
}
a = append(a, s)
if len(b) == 0 {
return a
}
return append(b, a)
}
// unique creates a unique name for the given parent and subname by affixing it
// with one or more counts, if necessary.
func (m *matcher) unique(parent, subname string) string {
base := parent + "/" + subname
for {
n := m.subNames[base]
if n < 0 {
panic("subtest count overflow")
}
m.subNames[base] = n + 1
if n == 0 && subname != "" {
prefix, nn := parseSubtestNumber(base)
if len(prefix) < len(base) && nn < m.subNames[prefix] {
// This test is explicitly named like "parent/subname#NN",
// and #NN was already used for the NNth occurrence of "parent/subname".
// Loop to add a disambiguating suffix.
continue
}
return base
}
name := fmt.Sprintf("%s#%02d", base, n)
if m.subNames[name] != 0 {
// This is the nth occurrence of base, but the name "parent/subname#NN"
// collides with the first occurrence of a subtest *explicitly* named
// "parent/subname#NN". Try the next number.
continue
}
return name
}
}
// parseSubtestNumber splits a subtest name into a "#%02d"-formatted int32
// suffix (if present), and a prefix preceding that suffix (always).
func parseSubtestNumber(s string) (prefix string, nn int32) {
i := strings.LastIndex(s, "#")
if i < 0 {
return s, 0
}
prefix, suffix := s[:i], s[i+1:]
if len(suffix) < 2 || (len(suffix) > 2 && suffix[0] == '0') {
// Even if suffix is numeric, it is not a possible output of a "%02" format
// string: it has either too few digits or too many leading zeroes.
return s, 0
}
if suffix == "00" {
if !strings.HasSuffix(prefix, "/") {
// We only use "#00" as a suffix for subtests named with the empty
// string — it isn't a valid suffix if the subtest name is non-empty.
return s, 0
}
}
n, err := strconv.ParseInt(suffix, 10, 32)
if err != nil || n < 0 {
return s, 0
}
return prefix, int32(n)
}
// rewrite rewrites a subname to having only printable characters and no white
// space.
func rewrite(s string) string {
b := []byte{}
for _, r := range s {
switch {
case isSpace(r):
b = append(b, '_')
case !strconv.IsPrint(r):
s := strconv.QuoteRune(r)
b = append(b, s[1:len(s)-1]...)
default:
b = append(b, string(r)...)
}
}
return string(b)
}
func isSpace(r rune) bool {
if r < 0x2000 {
switch r {
// Note: not the same as Unicode Z class.
case '\t', '\n', '\v', '\f', '\r', ' ', 0x85, 0xA0, 0x1680:
return true
}
} else {
if r <= 0x200a {
return true
}
switch r {
case 0x2028, 0x2029, 0x202f, 0x205f, 0x3000:
return true
}
}
return false
}

263
src/testing/match_test.go Normal file
View File

@@ -0,0 +1,263 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package testing
import (
"fmt"
"reflect"
"regexp"
"strings"
"unicode"
)
func init() {
testingTesting = true
}
// Verify that our IsSpace agrees with unicode.IsSpace.
func TestIsSpace(t *T) {
n := 0
for r := rune(0); r <= unicode.MaxRune; r++ {
if isSpace(r) != unicode.IsSpace(r) {
t.Errorf("IsSpace(%U)=%t incorrect", r, isSpace(r))
n++
if n > 10 {
return
}
}
}
}
func TestSplitRegexp(t *T) {
res := func(s ...string) filterMatch { return simpleMatch(s) }
alt := func(m ...filterMatch) filterMatch { return alternationMatch(m) }
testCases := []struct {
pattern string
result filterMatch
}{
// Correct patterns
// If a regexp pattern is correct, all split regexps need to be correct
// as well.
{"", res("")},
{"/", res("", "")},
{"//", res("", "", "")},
{"A", res("A")},
{"A/B", res("A", "B")},
{"A/B/", res("A", "B", "")},
{"/A/B/", res("", "A", "B", "")},
{"[A]/(B)", res("[A]", "(B)")},
{"[/]/[/]", res("[/]", "[/]")},
{"[/]/[:/]", res("[/]", "[:/]")},
{"/]", res("", "]")},
{"]/", res("]", "")},
{"]/[/]", res("]", "[/]")},
{`([)/][(])`, res(`([)/][(])`)},
{"[(]/[)]", res("[(]", "[)]")},
{"A/B|C/D", alt(res("A", "B"), res("C", "D"))},
// Faulty patterns
// Errors in original should produce at least one faulty regexp in results.
{")/", res(")/")},
{")/(/)", res(")/(", ")")},
{"a[/)b", res("a[/)b")},
{"(/]", res("(/]")},
{"(/", res("(/")},
{"[/]/[/", res("[/]", "[/")},
{`\p{/}`, res(`\p{`, "}")},
{`\p/`, res(`\p`, "")},
{`[[:/:]]`, res(`[[:/:]]`)},
}
for _, tc := range testCases {
a := splitRegexp(tc.pattern)
if !reflect.DeepEqual(a, tc.result) {
t.Errorf("splitRegexp(%q) = %#v; want %#v", tc.pattern, a, tc.result)
}
// If there is any error in the pattern, one of the returned subpatterns
// needs to have an error as well.
if _, err := regexp.Compile(tc.pattern); err != nil {
ok := true
if err := a.verify("", regexp.MatchString); err != nil {
ok = false
}
if ok {
t.Errorf("%s: expected error in any of %q", tc.pattern, a)
}
}
}
}
func TestMatcher(t *T) {
testCases := []struct {
pattern string
skip string
parent, sub string
ok bool
partial bool
}{
// Behavior without subtests.
{"", "", "", "TestFoo", true, false},
{"TestFoo", "", "", "TestFoo", true, false},
{"TestFoo/", "", "", "TestFoo", true, true},
{"TestFoo/bar/baz", "", "", "TestFoo", true, true},
{"TestFoo", "", "", "TestBar", false, false},
{"TestFoo/", "", "", "TestBar", false, false},
{"TestFoo/bar/baz", "", "", "TestBar/bar/baz", false, false},
{"", "TestBar", "", "TestFoo", true, false},
{"", "TestBar", "", "TestBar", false, false},
// Skipping a non-existent test doesn't change anything.
{"", "TestFoo/skipped", "", "TestFoo", true, false},
{"TestFoo", "TestFoo/skipped", "", "TestFoo", true, false},
{"TestFoo/", "TestFoo/skipped", "", "TestFoo", true, true},
{"TestFoo/bar/baz", "TestFoo/skipped", "", "TestFoo", true, true},
{"TestFoo", "TestFoo/skipped", "", "TestBar", false, false},
{"TestFoo/", "TestFoo/skipped", "", "TestBar", false, false},
{"TestFoo/bar/baz", "TestFoo/skipped", "", "TestBar/bar/baz", false, false},
// with subtests
{"", "", "TestFoo", "x", true, false},
{"TestFoo", "", "TestFoo", "x", true, false},
{"TestFoo/", "", "TestFoo", "x", true, false},
{"TestFoo/bar/baz", "", "TestFoo", "bar", true, true},
{"", "TestFoo/skipped", "TestFoo", "x", true, false},
{"TestFoo", "TestFoo/skipped", "TestFoo", "x", true, false},
{"TestFoo", "TestFoo/skipped", "TestFoo", "skipped", false, false},
{"TestFoo/", "TestFoo/skipped", "TestFoo", "x", true, false},
{"TestFoo/bar/baz", "TestFoo/skipped", "TestFoo", "bar", true, true},
// Subtest with a '/' in its name still allows for copy and pasted names
// to match.
{"TestFoo/bar/baz", "", "TestFoo", "bar/baz", true, false},
{"TestFoo/bar/baz", "TestFoo/bar/baz", "TestFoo", "bar/baz", false, false},
{"TestFoo/bar/baz", "TestFoo/bar/baz/skip", "TestFoo", "bar/baz", true, false},
{"TestFoo/bar/baz", "", "TestFoo/bar", "baz", true, false},
{"TestFoo/bar/baz", "", "TestFoo", "x", false, false},
{"TestFoo", "", "TestBar", "x", false, false},
{"TestFoo/", "", "TestBar", "x", false, false},
{"TestFoo/bar/baz", "", "TestBar", "x/bar/baz", false, false},
{"A/B|C/D", "", "TestA", "B", true, false},
{"A/B|C/D", "", "TestC", "D", true, false},
{"A/B|C/D", "", "TestA", "C", false, false},
// subtests only
{"", "", "TestFoo", "x", true, false},
{"/", "", "TestFoo", "x", true, false},
{"./", "", "TestFoo", "x", true, false},
{"./.", "", "TestFoo", "x", true, false},
{"/bar/baz", "", "TestFoo", "bar", true, true},
{"/bar/baz", "", "TestFoo", "bar/baz", true, false},
{"//baz", "", "TestFoo", "bar/baz", true, false},
{"//", "", "TestFoo", "bar/baz", true, false},
{"/bar/baz", "", "TestFoo/bar", "baz", true, false},
{"//foo", "", "TestFoo", "bar/baz", false, false},
{"/bar/baz", "", "TestFoo", "x", false, false},
{"/bar/baz", "", "TestBar", "x/bar/baz", false, false},
}
for _, tc := range testCases {
m := newMatcher(regexp.MatchString, tc.pattern, "-test.run", tc.skip)
parent := &common{name: tc.parent}
if tc.parent != "" {
parent.level = 1
}
if n, ok, partial := m.fullName(parent, tc.sub); ok != tc.ok || partial != tc.partial {
t.Errorf("for pattern %q, fullName(parent=%q, sub=%q) = %q, ok %v partial %v; want ok %v partial %v",
tc.pattern, tc.parent, tc.sub, n, ok, partial, tc.ok, tc.partial)
}
}
}
var namingTestCases = []struct{ name, want string }{
// Uniqueness
{"", "x/#00"},
{"", "x/#01"},
{"#0", "x/#0"}, // Doesn't conflict with #00 because the number of digits differs.
{"#00", "x/#00#01"}, // Conflicts with implicit #00 (used above), so add a suffix.
{"#", "x/#"},
{"#", "x/##01"},
{"t", "x/t"},
{"t", "x/t#01"},
{"t", "x/t#02"},
{"t#00", "x/t#00"}, // Explicit "#00" doesn't conflict with the unsuffixed first subtest.
{"a#01", "x/a#01"}, // user has subtest with this name.
{"a", "x/a"}, // doesn't conflict with this name.
{"a", "x/a#02"}, // This string is claimed now, so resume
{"a", "x/a#03"}, // with counting.
{"a#02", "x/a#02#01"}, // We already used a#02 once, so add a suffix.
{"b#00", "x/b#00"},
{"b", "x/b"}, // Implicit 0 doesn't conflict with explicit "#00".
{"b", "x/b#01"},
{"b#9223372036854775807", "x/b#9223372036854775807"}, // MaxInt64
{"b", "x/b#02"},
{"b", "x/b#03"},
// Sanitizing
{"A:1 B:2", "x/A:1_B:2"},
{"s\t\r\u00a0", "x/s___"},
{"\x01", `x/\x01`},
{"\U0010ffff", `x/\U0010ffff`},
}
func TestNaming(t *T) {
m := newMatcher(regexp.MatchString, "", "", "")
parent := &common{name: "x", level: 1} // top-level test.
for i, tc := range namingTestCases {
if got, _, _ := m.fullName(parent, tc.name); got != tc.want {
t.Errorf("%d:%s: got %q; want %q", i, tc.name, got, tc.want)
}
}
}
func FuzzNaming(f *F) {
for _, tc := range namingTestCases {
f.Add(tc.name)
}
parent := &common{name: "x", level: 1}
var m *matcher
var seen map[string]string
reset := func() {
m = allMatcher()
seen = make(map[string]string)
}
reset()
f.Fuzz(func(t *T, subname string) {
if len(subname) > 10 {
// Long names attract the OOM killer.
t.Skip()
}
name := m.unique(parent.name, subname)
if !strings.Contains(name, "/"+subname) {
t.Errorf("name %q does not contain subname %q", name, subname)
}
if prev, ok := seen[name]; ok {
t.Errorf("name %q generated by both %q and %q", name, prev, subname)
}
if len(seen) > 1e6 {
// Free up memory.
reset()
}
seen[name] = subname
})
}
// GoString returns a string that is more readable than the default, which makes
// it easier to read test errors.
func (m alternationMatch) GoString() string {
s := make([]string, len(m))
for i, m := range m {
s[i] = fmt.Sprintf("%#v", m)
}
return fmt.Sprintf("(%s)", strings.Join(s, " | "))
}

56
src/testing/newcover.go Normal file
View File

@@ -0,0 +1,56 @@
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Support for test coverage with redesigned coverage implementation.
package testing
import (
"fmt"
"internal/goexperiment"
"os"
_ "unsafe" // for linkname
)
// cover2 variable stores the current coverage mode and a
// tear-down function to be called at the end of the testing run.
var cover2 struct {
mode string
tearDown func(coverprofile string, gocoverdir string) (string, error)
snapshotcov func() float64
}
// registerCover2 is invoked during "go test -cover" runs.
// It is used to record a 'tear down' function
// (to be called when the test is complete) and the coverage mode.
func registerCover2(mode string, tearDown func(coverprofile string, gocoverdir string) (string, error), snapcov func() float64) {
if mode == "" {
return
}
cover2.mode = mode
cover2.tearDown = tearDown
cover2.snapshotcov = snapcov
}
// coverReport2 invokes a callback in _testmain.go that will
// emit coverage data at the point where test execution is complete,
// for "go test -cover" runs.
func coverReport2() {
if !goexperiment.CoverageRedesign {
panic("unexpected")
}
if errmsg, err := cover2.tearDown(*coverProfile, *gocoverdir); err != nil {
fmt.Fprintf(os.Stderr, "%s: %v\n", errmsg, err)
os.Exit(2)
}
}
// coverage2 returns a rough "coverage percentage so far"
// number to support the testing.Coverage() function.
func coverage2() float64 {
if cover2.mode == "" {
return 0.0
}
return cover2.snapshotcov()
}

267
src/testing/panic_test.go Normal file
View File

@@ -0,0 +1,267 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package testing_test
import (
"flag"
"fmt"
"internal/testenv"
"os"
"os/exec"
"regexp"
"runtime"
"strings"
"testing"
)
var testPanicTest = flag.String("test_panic_test", "", "TestPanic: indicates which test should panic")
var testPanicParallel = flag.Bool("test_panic_parallel", false, "TestPanic: run subtests in parallel")
var testPanicCleanup = flag.Bool("test_panic_cleanup", false, "TestPanic: indicates whether test should call Cleanup")
var testPanicCleanupPanic = flag.String("test_panic_cleanup_panic", "", "TestPanic: indicate whether test should call Cleanup function that panics")
func TestPanic(t *testing.T) {
testenv.MustHaveExec(t)
testCases := []struct {
desc string
flags []string
want string
}{{
desc: "root test panics",
flags: []string{"-test_panic_test=TestPanicHelper"},
want: `
--- FAIL: TestPanicHelper (N.NNs)
panic_test.go:NNN: TestPanicHelper
`,
}, {
desc: "subtest panics",
flags: []string{"-test_panic_test=TestPanicHelper/1"},
want: `
--- FAIL: TestPanicHelper (N.NNs)
panic_test.go:NNN: TestPanicHelper
--- FAIL: TestPanicHelper/1 (N.NNs)
panic_test.go:NNN: TestPanicHelper/1
`,
}, {
desc: "subtest panics with cleanup",
flags: []string{"-test_panic_test=TestPanicHelper/1", "-test_panic_cleanup"},
want: `
ran inner cleanup 1
ran middle cleanup 1
ran outer cleanup
--- FAIL: TestPanicHelper (N.NNs)
panic_test.go:NNN: TestPanicHelper
--- FAIL: TestPanicHelper/1 (N.NNs)
panic_test.go:NNN: TestPanicHelper/1
`,
}, {
desc: "subtest panics with outer cleanup panic",
flags: []string{"-test_panic_test=TestPanicHelper/1", "-test_panic_cleanup", "-test_panic_cleanup_panic=outer"},
want: `
ran inner cleanup 1
ran middle cleanup 1
ran outer cleanup
--- FAIL: TestPanicHelper (N.NNs)
panic_test.go:NNN: TestPanicHelper
`,
}, {
desc: "subtest panics with middle cleanup panic",
flags: []string{"-test_panic_test=TestPanicHelper/1", "-test_panic_cleanup", "-test_panic_cleanup_panic=middle"},
want: `
ran inner cleanup 1
ran middle cleanup 1
ran outer cleanup
--- FAIL: TestPanicHelper (N.NNs)
panic_test.go:NNN: TestPanicHelper
--- FAIL: TestPanicHelper/1 (N.NNs)
panic_test.go:NNN: TestPanicHelper/1
`,
}, {
desc: "subtest panics with inner cleanup panic",
flags: []string{"-test_panic_test=TestPanicHelper/1", "-test_panic_cleanup", "-test_panic_cleanup_panic=inner"},
want: `
ran inner cleanup 1
ran middle cleanup 1
ran outer cleanup
--- FAIL: TestPanicHelper (N.NNs)
panic_test.go:NNN: TestPanicHelper
--- FAIL: TestPanicHelper/1 (N.NNs)
panic_test.go:NNN: TestPanicHelper/1
`,
}, {
desc: "parallel subtest panics with cleanup",
flags: []string{"-test_panic_test=TestPanicHelper/1", "-test_panic_cleanup", "-test_panic_parallel"},
want: `
ran inner cleanup 1
ran middle cleanup 1
ran outer cleanup
--- FAIL: TestPanicHelper (N.NNs)
panic_test.go:NNN: TestPanicHelper
--- FAIL: TestPanicHelper/1 (N.NNs)
panic_test.go:NNN: TestPanicHelper/1
`,
}, {
desc: "parallel subtest panics with outer cleanup panic",
flags: []string{"-test_panic_test=TestPanicHelper/1", "-test_panic_cleanup", "-test_panic_cleanup_panic=outer", "-test_panic_parallel"},
want: `
ran inner cleanup 1
ran middle cleanup 1
ran outer cleanup
--- FAIL: TestPanicHelper (N.NNs)
panic_test.go:NNN: TestPanicHelper
`,
}, {
desc: "parallel subtest panics with middle cleanup panic",
flags: []string{"-test_panic_test=TestPanicHelper/1", "-test_panic_cleanup", "-test_panic_cleanup_panic=middle", "-test_panic_parallel"},
want: `
ran inner cleanup 1
ran middle cleanup 1
ran outer cleanup
--- FAIL: TestPanicHelper (N.NNs)
panic_test.go:NNN: TestPanicHelper
--- FAIL: TestPanicHelper/1 (N.NNs)
panic_test.go:NNN: TestPanicHelper/1
`,
}, {
desc: "parallel subtest panics with inner cleanup panic",
flags: []string{"-test_panic_test=TestPanicHelper/1", "-test_panic_cleanup", "-test_panic_cleanup_panic=inner", "-test_panic_parallel"},
want: `
ran inner cleanup 1
ran middle cleanup 1
ran outer cleanup
--- FAIL: TestPanicHelper (N.NNs)
panic_test.go:NNN: TestPanicHelper
--- FAIL: TestPanicHelper/1 (N.NNs)
panic_test.go:NNN: TestPanicHelper/1
`,
}}
for _, tc := range testCases {
t.Run(tc.desc, func(t *testing.T) {
cmd := exec.Command(os.Args[0], "-test.run=^TestPanicHelper$")
cmd.Args = append(cmd.Args, tc.flags...)
cmd.Env = append(os.Environ(), "GO_WANT_HELPER_PROCESS=1")
b, _ := cmd.CombinedOutput()
got := string(b)
want := strings.TrimSpace(tc.want)
re := makeRegexp(want)
if ok, err := regexp.MatchString(re, got); !ok || err != nil {
t.Errorf("output:\ngot:\n%s\nwant:\n%s", got, want)
}
})
}
}
func makeRegexp(s string) string {
s = regexp.QuoteMeta(s)
s = strings.ReplaceAll(s, ":NNN:", `:\d+:`)
s = strings.ReplaceAll(s, "N\\.NNs", `\d*\.\d*s`)
return s
}
func TestPanicHelper(t *testing.T) {
if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" {
return
}
t.Log(t.Name())
if t.Name() == *testPanicTest {
panic("panic")
}
switch *testPanicCleanupPanic {
case "", "outer", "middle", "inner":
default:
t.Fatalf("bad -test_panic_cleanup_panic: %s", *testPanicCleanupPanic)
}
t.Cleanup(func() {
fmt.Println("ran outer cleanup")
if *testPanicCleanupPanic == "outer" {
panic("outer cleanup")
}
})
for i := 0; i < 3; i++ {
i := i
t.Run(fmt.Sprintf("%v", i), func(t *testing.T) {
chosen := t.Name() == *testPanicTest
if chosen && *testPanicCleanup {
t.Cleanup(func() {
fmt.Printf("ran middle cleanup %d\n", i)
if *testPanicCleanupPanic == "middle" {
panic("middle cleanup")
}
})
}
if chosen && *testPanicParallel {
t.Parallel()
}
t.Log(t.Name())
if chosen {
if *testPanicCleanup {
t.Cleanup(func() {
fmt.Printf("ran inner cleanup %d\n", i)
if *testPanicCleanupPanic == "inner" {
panic("inner cleanup")
}
})
}
panic("panic")
}
})
}
}
func TestMorePanic(t *testing.T) {
testenv.MustHaveExec(t)
testCases := []struct {
desc string
flags []string
want string
}{
{
desc: "Issue 48502: call runtime.Goexit in t.Cleanup after panic",
flags: []string{"-test.run=^TestGoexitInCleanupAfterPanicHelper$"},
want: `panic: die
panic: test executed panic(nil) or runtime.Goexit`,
},
{
desc: "Issue 48515: call t.Run in t.Cleanup should trigger panic",
flags: []string{"-test.run=^TestCallRunInCleanupHelper$"},
want: `panic: testing: t.Run called during t.Cleanup`,
},
}
for _, tc := range testCases {
cmd := exec.Command(os.Args[0], tc.flags...)
cmd.Env = append(os.Environ(), "GO_WANT_HELPER_PROCESS=1")
b, _ := cmd.CombinedOutput()
got := string(b)
want := tc.want
re := makeRegexp(want)
if ok, err := regexp.MatchString(re, got); !ok || err != nil {
t.Errorf("output:\ngot:\n%s\nwant:\n%s", got, want)
}
}
}
func TestCallRunInCleanupHelper(t *testing.T) {
if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" {
return
}
t.Cleanup(func() {
t.Run("in-cleanup", func(t *testing.T) {
t.Log("must not be executed")
})
})
}
func TestGoexitInCleanupAfterPanicHelper(t *testing.T) {
if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" {
return
}
t.Cleanup(func() { runtime.Goexit() })
t.Parallel()
panic("die")
}

385
src/testing/quick/quick.go Normal file
View File

@@ -0,0 +1,385 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package quick implements utility functions to help with black box testing.
//
// The testing/quick package is frozen and is not accepting new features.
package quick
import (
"flag"
"fmt"
"math"
"math/rand"
"reflect"
"strings"
"time"
)
var defaultMaxCount *int = flag.Int("quickchecks", 100, "The default number of iterations for each check")
// A Generator can generate random values of its own type.
type Generator interface {
// Generate returns a random instance of the type on which it is a
// method using the size as a size hint.
Generate(rand *rand.Rand, size int) reflect.Value
}
// randFloat32 generates a random float taking the full range of a float32.
func randFloat32(rand *rand.Rand) float32 {
f := rand.Float64() * math.MaxFloat32
if rand.Int()&1 == 1 {
f = -f
}
return float32(f)
}
// randFloat64 generates a random float taking the full range of a float64.
func randFloat64(rand *rand.Rand) float64 {
f := rand.Float64() * math.MaxFloat64
if rand.Int()&1 == 1 {
f = -f
}
return f
}
// randInt64 returns a random int64.
func randInt64(rand *rand.Rand) int64 {
return int64(rand.Uint64())
}
// complexSize is the maximum length of arbitrary values that contain other
// values.
const complexSize = 50
// Value returns an arbitrary value of the given type.
// If the type implements the [Generator] interface, that will be used.
// Note: To create arbitrary values for structs, all the fields must be exported.
func Value(t reflect.Type, rand *rand.Rand) (value reflect.Value, ok bool) {
return sizedValue(t, rand, complexSize)
}
// sizedValue returns an arbitrary value of the given type. The size
// hint is used for shrinking as a function of indirection level so
// that recursive data structures will terminate.
func sizedValue(t reflect.Type, rand *rand.Rand, size int) (value reflect.Value, ok bool) {
if m, ok := reflect.Zero(t).Interface().(Generator); ok {
return m.Generate(rand, size), true
}
v := reflect.New(t).Elem()
switch concrete := t; concrete.Kind() {
case reflect.Bool:
v.SetBool(rand.Int()&1 == 0)
case reflect.Float32:
v.SetFloat(float64(randFloat32(rand)))
case reflect.Float64:
v.SetFloat(randFloat64(rand))
case reflect.Complex64:
v.SetComplex(complex(float64(randFloat32(rand)), float64(randFloat32(rand))))
case reflect.Complex128:
v.SetComplex(complex(randFloat64(rand), randFloat64(rand)))
case reflect.Int16:
v.SetInt(randInt64(rand))
case reflect.Int32:
v.SetInt(randInt64(rand))
case reflect.Int64:
v.SetInt(randInt64(rand))
case reflect.Int8:
v.SetInt(randInt64(rand))
case reflect.Int:
v.SetInt(randInt64(rand))
case reflect.Uint16:
v.SetUint(uint64(randInt64(rand)))
case reflect.Uint32:
v.SetUint(uint64(randInt64(rand)))
case reflect.Uint64:
v.SetUint(uint64(randInt64(rand)))
case reflect.Uint8:
v.SetUint(uint64(randInt64(rand)))
case reflect.Uint:
v.SetUint(uint64(randInt64(rand)))
case reflect.Uintptr:
v.SetUint(uint64(randInt64(rand)))
case reflect.Map:
numElems := rand.Intn(size)
v.Set(reflect.MakeMap(concrete))
for i := 0; i < numElems; i++ {
key, ok1 := sizedValue(concrete.Key(), rand, size)
value, ok2 := sizedValue(concrete.Elem(), rand, size)
if !ok1 || !ok2 {
return reflect.Value{}, false
}
v.SetMapIndex(key, value)
}
case reflect.Pointer:
if rand.Intn(size) == 0 {
v.SetZero() // Generate nil pointer.
} else {
elem, ok := sizedValue(concrete.Elem(), rand, size)
if !ok {
return reflect.Value{}, false
}
v.Set(reflect.New(concrete.Elem()))
v.Elem().Set(elem)
}
case reflect.Slice:
numElems := rand.Intn(size)
sizeLeft := size - numElems
v.Set(reflect.MakeSlice(concrete, numElems, numElems))
for i := 0; i < numElems; i++ {
elem, ok := sizedValue(concrete.Elem(), rand, sizeLeft)
if !ok {
return reflect.Value{}, false
}
v.Index(i).Set(elem)
}
case reflect.Array:
for i := 0; i < v.Len(); i++ {
elem, ok := sizedValue(concrete.Elem(), rand, size)
if !ok {
return reflect.Value{}, false
}
v.Index(i).Set(elem)
}
case reflect.String:
numChars := rand.Intn(complexSize)
codePoints := make([]rune, numChars)
for i := 0; i < numChars; i++ {
codePoints[i] = rune(rand.Intn(0x10ffff))
}
v.SetString(string(codePoints))
case reflect.Struct:
n := v.NumField()
// Divide sizeLeft evenly among the struct fields.
sizeLeft := size
if n > sizeLeft {
sizeLeft = 1
} else if n > 0 {
sizeLeft /= n
}
for i := 0; i < n; i++ {
elem, ok := sizedValue(concrete.Field(i).Type, rand, sizeLeft)
if !ok {
return reflect.Value{}, false
}
v.Field(i).Set(elem)
}
default:
return reflect.Value{}, false
}
return v, true
}
// A Config structure contains options for running a test.
type Config struct {
// MaxCount sets the maximum number of iterations.
// If zero, MaxCountScale is used.
MaxCount int
// MaxCountScale is a non-negative scale factor applied to the
// default maximum.
// A count of zero implies the default, which is usually 100
// but can be set by the -quickchecks flag.
MaxCountScale float64
// Rand specifies a source of random numbers.
// If nil, a default pseudo-random source will be used.
Rand *rand.Rand
// Values specifies a function to generate a slice of
// arbitrary reflect.Values that are congruent with the
// arguments to the function being tested.
// If nil, the top-level Value function is used to generate them.
Values func([]reflect.Value, *rand.Rand)
}
var defaultConfig Config
// getRand returns the *rand.Rand to use for a given Config.
func (c *Config) getRand() *rand.Rand {
if c.Rand == nil {
return rand.New(rand.NewSource(time.Now().UnixNano()))
}
return c.Rand
}
// getMaxCount returns the maximum number of iterations to run for a given
// Config.
func (c *Config) getMaxCount() (maxCount int) {
maxCount = c.MaxCount
if maxCount == 0 {
if c.MaxCountScale != 0 {
maxCount = int(c.MaxCountScale * float64(*defaultMaxCount))
} else {
maxCount = *defaultMaxCount
}
}
return
}
// A SetupError is the result of an error in the way that check is being
// used, independent of the functions being tested.
type SetupError string
func (s SetupError) Error() string { return string(s) }
// A CheckError is the result of Check finding an error.
type CheckError struct {
Count int
In []any
}
func (s *CheckError) Error() string {
return fmt.Sprintf("#%d: failed on input %s", s.Count, toString(s.In))
}
// A CheckEqualError is the result [CheckEqual] finding an error.
type CheckEqualError struct {
CheckError
Out1 []any
Out2 []any
}
func (s *CheckEqualError) Error() string {
return fmt.Sprintf("#%d: failed on input %s. Output 1: %s. Output 2: %s", s.Count, toString(s.In), toString(s.Out1), toString(s.Out2))
}
// Check looks for an input to f, any function that returns bool,
// such that f returns false. It calls f repeatedly, with arbitrary
// values for each argument. If f returns false on a given input,
// Check returns that input as a *[CheckError].
// For example:
//
// func TestOddMultipleOfThree(t *testing.T) {
// f := func(x int) bool {
// y := OddMultipleOfThree(x)
// return y%2 == 1 && y%3 == 0
// }
// if err := quick.Check(f, nil); err != nil {
// t.Error(err)
// }
// }
func Check(f any, config *Config) error {
if config == nil {
config = &defaultConfig
}
fVal, fType, ok := functionAndType(f)
if !ok {
return SetupError("argument is not a function")
}
if fType.NumOut() != 1 {
return SetupError("function does not return one value")
}
if fType.Out(0).Kind() != reflect.Bool {
return SetupError("function does not return a bool")
}
arguments := make([]reflect.Value, fType.NumIn())
rand := config.getRand()
maxCount := config.getMaxCount()
for i := 0; i < maxCount; i++ {
err := arbitraryValues(arguments, fType, config, rand)
if err != nil {
return err
}
if !fVal.Call(arguments)[0].Bool() {
return &CheckError{i + 1, toInterfaces(arguments)}
}
}
return nil
}
// CheckEqual looks for an input on which f and g return different results.
// It calls f and g repeatedly with arbitrary values for each argument.
// If f and g return different answers, CheckEqual returns a *[CheckEqualError]
// describing the input and the outputs.
func CheckEqual(f, g any, config *Config) error {
if config == nil {
config = &defaultConfig
}
x, xType, ok := functionAndType(f)
if !ok {
return SetupError("f is not a function")
}
y, yType, ok := functionAndType(g)
if !ok {
return SetupError("g is not a function")
}
if xType != yType {
return SetupError("functions have different types")
}
arguments := make([]reflect.Value, xType.NumIn())
rand := config.getRand()
maxCount := config.getMaxCount()
for i := 0; i < maxCount; i++ {
err := arbitraryValues(arguments, xType, config, rand)
if err != nil {
return err
}
xOut := toInterfaces(x.Call(arguments))
yOut := toInterfaces(y.Call(arguments))
if !reflect.DeepEqual(xOut, yOut) {
return &CheckEqualError{CheckError{i + 1, toInterfaces(arguments)}, xOut, yOut}
}
}
return nil
}
// arbitraryValues writes Values to args such that args contains Values
// suitable for calling f.
func arbitraryValues(args []reflect.Value, f reflect.Type, config *Config, rand *rand.Rand) (err error) {
if config.Values != nil {
config.Values(args, rand)
return
}
for j := 0; j < len(args); j++ {
var ok bool
args[j], ok = Value(f.In(j), rand)
if !ok {
err = SetupError(fmt.Sprintf("cannot create arbitrary value of type %s for argument %d", f.In(j), j))
return
}
}
return
}
func functionAndType(f any) (v reflect.Value, t reflect.Type, ok bool) {
v = reflect.ValueOf(f)
ok = v.Kind() == reflect.Func
if !ok {
return
}
t = v.Type()
return
}
func toInterfaces(values []reflect.Value) []any {
ret := make([]any, len(values))
for i, v := range values {
ret[i] = v.Interface()
}
return ret
}
func toString(interfaces []any) string {
s := make([]string, len(interfaces))
for i, v := range interfaces {
s[i] = fmt.Sprintf("%#v", v)
}
return strings.Join(s, ", ")
}

View File

@@ -0,0 +1,327 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package quick
import (
"math/rand"
"reflect"
"testing"
)
func fArray(a [4]byte) [4]byte { return a }
type TestArrayAlias [4]byte
func fArrayAlias(a TestArrayAlias) TestArrayAlias { return a }
func fBool(a bool) bool { return a }
type TestBoolAlias bool
func fBoolAlias(a TestBoolAlias) TestBoolAlias { return a }
func fFloat32(a float32) float32 { return a }
type TestFloat32Alias float32
func fFloat32Alias(a TestFloat32Alias) TestFloat32Alias { return a }
func fFloat64(a float64) float64 { return a }
type TestFloat64Alias float64
func fFloat64Alias(a TestFloat64Alias) TestFloat64Alias { return a }
func fComplex64(a complex64) complex64 { return a }
type TestComplex64Alias complex64
func fComplex64Alias(a TestComplex64Alias) TestComplex64Alias { return a }
func fComplex128(a complex128) complex128 { return a }
type TestComplex128Alias complex128
func fComplex128Alias(a TestComplex128Alias) TestComplex128Alias { return a }
func fInt16(a int16) int16 { return a }
type TestInt16Alias int16
func fInt16Alias(a TestInt16Alias) TestInt16Alias { return a }
func fInt32(a int32) int32 { return a }
type TestInt32Alias int32
func fInt32Alias(a TestInt32Alias) TestInt32Alias { return a }
func fInt64(a int64) int64 { return a }
type TestInt64Alias int64
func fInt64Alias(a TestInt64Alias) TestInt64Alias { return a }
func fInt8(a int8) int8 { return a }
type TestInt8Alias int8
func fInt8Alias(a TestInt8Alias) TestInt8Alias { return a }
func fInt(a int) int { return a }
type TestIntAlias int
func fIntAlias(a TestIntAlias) TestIntAlias { return a }
func fMap(a map[int]int) map[int]int { return a }
type TestMapAlias map[int]int
func fMapAlias(a TestMapAlias) TestMapAlias { return a }
func fPtr(a *int) *int {
if a == nil {
return nil
}
b := *a
return &b
}
type TestPtrAlias *int
func fPtrAlias(a TestPtrAlias) TestPtrAlias { return a }
func fSlice(a []byte) []byte { return a }
type TestSliceAlias []byte
func fSliceAlias(a TestSliceAlias) TestSliceAlias { return a }
func fString(a string) string { return a }
type TestStringAlias string
func fStringAlias(a TestStringAlias) TestStringAlias { return a }
type TestStruct struct {
A int
B string
}
func fStruct(a TestStruct) TestStruct { return a }
type TestStructAlias TestStruct
func fStructAlias(a TestStructAlias) TestStructAlias { return a }
func fUint16(a uint16) uint16 { return a }
type TestUint16Alias uint16
func fUint16Alias(a TestUint16Alias) TestUint16Alias { return a }
func fUint32(a uint32) uint32 { return a }
type TestUint32Alias uint32
func fUint32Alias(a TestUint32Alias) TestUint32Alias { return a }
func fUint64(a uint64) uint64 { return a }
type TestUint64Alias uint64
func fUint64Alias(a TestUint64Alias) TestUint64Alias { return a }
func fUint8(a uint8) uint8 { return a }
type TestUint8Alias uint8
func fUint8Alias(a TestUint8Alias) TestUint8Alias { return a }
func fUint(a uint) uint { return a }
type TestUintAlias uint
func fUintAlias(a TestUintAlias) TestUintAlias { return a }
func fUintptr(a uintptr) uintptr { return a }
type TestUintptrAlias uintptr
func fUintptrAlias(a TestUintptrAlias) TestUintptrAlias { return a }
func reportError(property string, err error, t *testing.T) {
if err != nil {
t.Errorf("%s: %s", property, err)
}
}
func TestCheckEqual(t *testing.T) {
reportError("fArray", CheckEqual(fArray, fArray, nil), t)
reportError("fArrayAlias", CheckEqual(fArrayAlias, fArrayAlias, nil), t)
reportError("fBool", CheckEqual(fBool, fBool, nil), t)
reportError("fBoolAlias", CheckEqual(fBoolAlias, fBoolAlias, nil), t)
reportError("fFloat32", CheckEqual(fFloat32, fFloat32, nil), t)
reportError("fFloat32Alias", CheckEqual(fFloat32Alias, fFloat32Alias, nil), t)
reportError("fFloat64", CheckEqual(fFloat64, fFloat64, nil), t)
reportError("fFloat64Alias", CheckEqual(fFloat64Alias, fFloat64Alias, nil), t)
reportError("fComplex64", CheckEqual(fComplex64, fComplex64, nil), t)
reportError("fComplex64Alias", CheckEqual(fComplex64Alias, fComplex64Alias, nil), t)
reportError("fComplex128", CheckEqual(fComplex128, fComplex128, nil), t)
reportError("fComplex128Alias", CheckEqual(fComplex128Alias, fComplex128Alias, nil), t)
reportError("fInt16", CheckEqual(fInt16, fInt16, nil), t)
reportError("fInt16Alias", CheckEqual(fInt16Alias, fInt16Alias, nil), t)
reportError("fInt32", CheckEqual(fInt32, fInt32, nil), t)
reportError("fInt32Alias", CheckEqual(fInt32Alias, fInt32Alias, nil), t)
reportError("fInt64", CheckEqual(fInt64, fInt64, nil), t)
reportError("fInt64Alias", CheckEqual(fInt64Alias, fInt64Alias, nil), t)
reportError("fInt8", CheckEqual(fInt8, fInt8, nil), t)
reportError("fInt8Alias", CheckEqual(fInt8Alias, fInt8Alias, nil), t)
reportError("fInt", CheckEqual(fInt, fInt, nil), t)
reportError("fIntAlias", CheckEqual(fIntAlias, fIntAlias, nil), t)
reportError("fInt32", CheckEqual(fInt32, fInt32, nil), t)
reportError("fInt32Alias", CheckEqual(fInt32Alias, fInt32Alias, nil), t)
reportError("fMap", CheckEqual(fMap, fMap, nil), t)
reportError("fMapAlias", CheckEqual(fMapAlias, fMapAlias, nil), t)
reportError("fPtr", CheckEqual(fPtr, fPtr, nil), t)
reportError("fPtrAlias", CheckEqual(fPtrAlias, fPtrAlias, nil), t)
reportError("fSlice", CheckEqual(fSlice, fSlice, nil), t)
reportError("fSliceAlias", CheckEqual(fSliceAlias, fSliceAlias, nil), t)
reportError("fString", CheckEqual(fString, fString, nil), t)
reportError("fStringAlias", CheckEqual(fStringAlias, fStringAlias, nil), t)
reportError("fStruct", CheckEqual(fStruct, fStruct, nil), t)
reportError("fStructAlias", CheckEqual(fStructAlias, fStructAlias, nil), t)
reportError("fUint16", CheckEqual(fUint16, fUint16, nil), t)
reportError("fUint16Alias", CheckEqual(fUint16Alias, fUint16Alias, nil), t)
reportError("fUint32", CheckEqual(fUint32, fUint32, nil), t)
reportError("fUint32Alias", CheckEqual(fUint32Alias, fUint32Alias, nil), t)
reportError("fUint64", CheckEqual(fUint64, fUint64, nil), t)
reportError("fUint64Alias", CheckEqual(fUint64Alias, fUint64Alias, nil), t)
reportError("fUint8", CheckEqual(fUint8, fUint8, nil), t)
reportError("fUint8Alias", CheckEqual(fUint8Alias, fUint8Alias, nil), t)
reportError("fUint", CheckEqual(fUint, fUint, nil), t)
reportError("fUintAlias", CheckEqual(fUintAlias, fUintAlias, nil), t)
reportError("fUintptr", CheckEqual(fUintptr, fUintptr, nil), t)
reportError("fUintptrAlias", CheckEqual(fUintptrAlias, fUintptrAlias, nil), t)
}
// This tests that ArbitraryValue is working by checking that all the arbitrary
// values of type MyStruct have x = 42.
type myStruct struct {
x int
}
func (m myStruct) Generate(r *rand.Rand, _ int) reflect.Value {
return reflect.ValueOf(myStruct{x: 42})
}
func myStructProperty(in myStruct) bool { return in.x == 42 }
func TestCheckProperty(t *testing.T) {
reportError("myStructProperty", Check(myStructProperty, nil), t)
}
func TestFailure(t *testing.T) {
f := func(x int) bool { return false }
err := Check(f, nil)
if err == nil {
t.Errorf("Check didn't return an error")
}
if _, ok := err.(*CheckError); !ok {
t.Errorf("Error was not a CheckError: %s", err)
}
err = CheckEqual(fUint, fUint32, nil)
if err == nil {
t.Errorf("#1 CheckEqual didn't return an error")
}
if _, ok := err.(SetupError); !ok {
t.Errorf("#1 Error was not a SetupError: %s", err)
}
err = CheckEqual(func(x, y int) {}, func(x int) {}, nil)
if err == nil {
t.Errorf("#2 CheckEqual didn't return an error")
}
if _, ok := err.(SetupError); !ok {
t.Errorf("#2 Error was not a SetupError: %s", err)
}
err = CheckEqual(func(x int) int { return 0 }, func(x int) int32 { return 0 }, nil)
if err == nil {
t.Errorf("#3 CheckEqual didn't return an error")
}
if _, ok := err.(SetupError); !ok {
t.Errorf("#3 Error was not a SetupError: %s", err)
}
}
// Recursive data structures didn't terminate.
// Issues 8818 and 11148.
func TestRecursive(t *testing.T) {
type R struct {
Ptr *R
SliceP []*R
Slice []R
Map map[int]R
MapP map[int]*R
MapR map[*R]*R
SliceMap []map[int]R
}
f := func(r R) bool { return true }
Check(f, nil)
}
func TestEmptyStruct(t *testing.T) {
f := func(struct{}) bool { return true }
Check(f, nil)
}
type (
A struct{ B *B }
B struct{ A *A }
)
func TestMutuallyRecursive(t *testing.T) {
f := func(a A) bool { return true }
Check(f, nil)
}
// Some serialization formats (e.g. encoding/pem) cannot distinguish
// between a nil and an empty map or slice, so avoid generating the
// zero value for these.
func TestNonZeroSliceAndMap(t *testing.T) {
type Q struct {
M map[int]int
S []int
}
f := func(q Q) bool {
return q.M != nil && q.S != nil
}
err := Check(f, nil)
if err != nil {
t.Fatal(err)
}
}
func TestInt64(t *testing.T) {
var lo, hi int64
f := func(x int64) bool {
if x < lo {
lo = x
}
if x > hi {
hi = x
}
return true
}
cfg := &Config{MaxCount: 10000}
Check(f, cfg)
if uint64(lo)>>62 == 0 || uint64(hi)>>62 == 0 {
t.Errorf("int64 returned range %#016x,%#016x; does not look like full range", lo, hi)
}
}

View File

@@ -0,0 +1,66 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !js && !wasip1
// TODO(@musiol, @odeke-em): re-unify this entire file back into
// example.go when js/wasm gets an os.Pipe implementation
// and no longer needs this separation.
package testing
import (
"fmt"
"io"
"os"
"strings"
"time"
)
func runExample(eg InternalExample) (ok bool) {
if chatty.on {
fmt.Printf("%s=== RUN %s\n", chatty.prefix(), eg.Name)
}
// Capture stdout.
stdout := os.Stdout
r, w, err := os.Pipe()
if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
os.Stdout = w
outC := make(chan string)
go func() {
var buf strings.Builder
_, err := io.Copy(&buf, r)
r.Close()
if err != nil {
fmt.Fprintf(os.Stderr, "testing: copying pipe: %v\n", err)
os.Exit(1)
}
outC <- buf.String()
}()
finished := false
start := time.Now()
// Clean up in a deferred call so we can recover if the example panics.
defer func() {
timeSpent := time.Since(start)
// Close pipe, restore stdout, get output.
w.Close()
os.Stdout = stdout
out := <-outC
err := recover()
ok = eg.processRunResult(out, timeSpent, finished, err)
}()
// Run example.
eg.F()
finished = true
return
}

View File

@@ -0,0 +1,76 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build js || wasip1
package testing
import (
"fmt"
"io"
"os"
"strings"
"time"
)
// TODO(@musiol, @odeke-em): unify this code back into
// example.go when js/wasm gets an os.Pipe implementation.
func runExample(eg InternalExample) (ok bool) {
if chatty.on {
fmt.Printf("%s=== RUN %s\n", chatty.prefix(), eg.Name)
}
// Capture stdout to temporary file. We're not using
// os.Pipe because it is not supported on js/wasm.
stdout := os.Stdout
f := createTempFile(eg.Name)
os.Stdout = f
finished := false
start := time.Now()
// Clean up in a deferred call so we can recover if the example panics.
defer func() {
timeSpent := time.Since(start)
// Restore stdout, get output and remove temporary file.
os.Stdout = stdout
var buf strings.Builder
_, seekErr := f.Seek(0, io.SeekStart)
_, readErr := io.Copy(&buf, f)
out := buf.String()
f.Close()
os.Remove(f.Name())
if seekErr != nil {
fmt.Fprintf(os.Stderr, "testing: seek temp file: %v\n", seekErr)
os.Exit(1)
}
if readErr != nil {
fmt.Fprintf(os.Stderr, "testing: read temp file: %v\n", readErr)
os.Exit(1)
}
err := recover()
ok = eg.processRunResult(out, timeSpent, finished, err)
}()
// Run example.
eg.F()
finished = true
return
}
func createTempFile(exampleName string) *os.File {
for i := 0; ; i++ {
name := fmt.Sprintf("%s/go-example-stdout-%s-%d.txt", os.TempDir(), exampleName, i)
f, err := os.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600)
if err != nil {
if os.IsExist(err) {
continue
}
fmt.Fprintf(os.Stderr, "testing: open temp file: %v\n", err)
os.Exit(1)
}
return f
}
}

View File

@@ -0,0 +1,44 @@
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package slogtest_test
import (
"bytes"
"encoding/json"
"log"
"log/slog"
"testing/slogtest"
)
// This example demonstrates one technique for testing a handler with this
// package. The handler is given a [bytes.Buffer] to write to, and each line
// of the resulting output is parsed.
// For JSON output, [encoding/json.Unmarshal] produces a result in the desired
// format when given a pointer to a map[string]any.
func Example_parsing() {
var buf bytes.Buffer
h := slog.NewJSONHandler(&buf, nil)
results := func() []map[string]any {
var ms []map[string]any
for _, line := range bytes.Split(buf.Bytes(), []byte{'\n'}) {
if len(line) == 0 {
continue
}
var m map[string]any
if err := json.Unmarshal(line, &m); err != nil {
panic(err) // In a real test, use t.Fatal.
}
ms = append(ms, m)
}
return ms
}
err := slogtest.TestHandler(h, results)
if err != nil {
log.Fatal(err)
}
// Output:
}

View File

@@ -0,0 +1,31 @@
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package slogtest_test
import (
"bytes"
"encoding/json"
"log/slog"
"testing"
"testing/slogtest"
)
func TestRun(t *testing.T) {
var buf bytes.Buffer
newHandler := func(*testing.T) slog.Handler {
buf.Reset()
return slog.NewJSONHandler(&buf, nil)
}
result := func(t *testing.T) map[string]any {
m := map[string]any{}
if err := json.Unmarshal(buf.Bytes(), &m); err != nil {
t.Fatal(err)
}
return m
}
slogtest.Run(t, newHandler, result)
}

View File

@@ -0,0 +1,375 @@
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package slogtest implements support for testing implementations of log/slog.Handler.
package slogtest
import (
"context"
"errors"
"fmt"
"log/slog"
"reflect"
"runtime"
"testing"
"time"
)
type testCase struct {
// Subtest name.
name string
// If non-empty, explanation explains the violated constraint.
explanation string
// f executes a single log event using its argument logger.
// So that mkdescs.sh can generate the right description,
// the body of f must appear on a single line whose first
// non-whitespace characters are "l.".
f func(*slog.Logger)
// If mod is not nil, it is called to modify the Record
// generated by the Logger before it is passed to the Handler.
mod func(*slog.Record)
// checks is a list of checks to run on the result.
checks []check
}
var cases = []testCase{
{
name: "built-ins",
explanation: withSource("this test expects slog.TimeKey, slog.LevelKey and slog.MessageKey"),
f: func(l *slog.Logger) {
l.Info("message")
},
checks: []check{
hasKey(slog.TimeKey),
hasKey(slog.LevelKey),
hasAttr(slog.MessageKey, "message"),
},
},
{
name: "attrs",
explanation: withSource("a Handler should output attributes passed to the logging function"),
f: func(l *slog.Logger) {
l.Info("message", "k", "v")
},
checks: []check{
hasAttr("k", "v"),
},
},
{
name: "empty-attr",
explanation: withSource("a Handler should ignore an empty Attr"),
f: func(l *slog.Logger) {
l.Info("msg", "a", "b", "", nil, "c", "d")
},
checks: []check{
hasAttr("a", "b"),
missingKey(""),
hasAttr("c", "d"),
},
},
{
name: "zero-time",
explanation: withSource("a Handler should ignore a zero Record.Time"),
f: func(l *slog.Logger) {
l.Info("msg", "k", "v")
},
mod: func(r *slog.Record) { r.Time = time.Time{} },
checks: []check{
missingKey(slog.TimeKey),
},
},
{
name: "WithAttrs",
explanation: withSource("a Handler should include the attributes from the WithAttrs method"),
f: func(l *slog.Logger) {
l.With("a", "b").Info("msg", "k", "v")
},
checks: []check{
hasAttr("a", "b"),
hasAttr("k", "v"),
},
},
{
name: "groups",
explanation: withSource("a Handler should handle Group attributes"),
f: func(l *slog.Logger) {
l.Info("msg", "a", "b", slog.Group("G", slog.String("c", "d")), "e", "f")
},
checks: []check{
hasAttr("a", "b"),
inGroup("G", hasAttr("c", "d")),
hasAttr("e", "f"),
},
},
{
name: "empty-group",
explanation: withSource("a Handler should ignore an empty group"),
f: func(l *slog.Logger) {
l.Info("msg", "a", "b", slog.Group("G"), "e", "f")
},
checks: []check{
hasAttr("a", "b"),
missingKey("G"),
hasAttr("e", "f"),
},
},
{
name: "inline-group",
explanation: withSource("a Handler should inline the Attrs of a group with an empty key"),
f: func(l *slog.Logger) {
l.Info("msg", "a", "b", slog.Group("", slog.String("c", "d")), "e", "f")
},
checks: []check{
hasAttr("a", "b"),
hasAttr("c", "d"),
hasAttr("e", "f"),
},
},
{
name: "WithGroup",
explanation: withSource("a Handler should handle the WithGroup method"),
f: func(l *slog.Logger) {
l.WithGroup("G").Info("msg", "a", "b")
},
checks: []check{
hasKey(slog.TimeKey),
hasKey(slog.LevelKey),
hasAttr(slog.MessageKey, "msg"),
missingKey("a"),
inGroup("G", hasAttr("a", "b")),
},
},
{
name: "multi-With",
explanation: withSource("a Handler should handle multiple WithGroup and WithAttr calls"),
f: func(l *slog.Logger) {
l.With("a", "b").WithGroup("G").With("c", "d").WithGroup("H").Info("msg", "e", "f")
},
checks: []check{
hasKey(slog.TimeKey),
hasKey(slog.LevelKey),
hasAttr(slog.MessageKey, "msg"),
hasAttr("a", "b"),
inGroup("G", hasAttr("c", "d")),
inGroup("G", inGroup("H", hasAttr("e", "f"))),
},
},
{
name: "empty-group-record",
explanation: withSource("a Handler should not output groups if there are no attributes"),
f: func(l *slog.Logger) {
l.With("a", "b").WithGroup("G").With("c", "d").WithGroup("H").Info("msg")
},
checks: []check{
hasKey(slog.TimeKey),
hasKey(slog.LevelKey),
hasAttr(slog.MessageKey, "msg"),
hasAttr("a", "b"),
inGroup("G", hasAttr("c", "d")),
inGroup("G", missingKey("H")),
},
},
{
name: "resolve",
explanation: withSource("a Handler should call Resolve on attribute values"),
f: func(l *slog.Logger) {
l.Info("msg", "k", &replace{"replaced"})
},
checks: []check{hasAttr("k", "replaced")},
},
{
name: "resolve-groups",
explanation: withSource("a Handler should call Resolve on attribute values in groups"),
f: func(l *slog.Logger) {
l.Info("msg",
slog.Group("G",
slog.String("a", "v1"),
slog.Any("b", &replace{"v2"})))
},
checks: []check{
inGroup("G", hasAttr("a", "v1")),
inGroup("G", hasAttr("b", "v2")),
},
},
{
name: "resolve-WithAttrs",
explanation: withSource("a Handler should call Resolve on attribute values from WithAttrs"),
f: func(l *slog.Logger) {
l = l.With("k", &replace{"replaced"})
l.Info("msg")
},
checks: []check{hasAttr("k", "replaced")},
},
{
name: "resolve-WithAttrs-groups",
explanation: withSource("a Handler should call Resolve on attribute values in groups from WithAttrs"),
f: func(l *slog.Logger) {
l = l.With(slog.Group("G",
slog.String("a", "v1"),
slog.Any("b", &replace{"v2"})))
l.Info("msg")
},
checks: []check{
inGroup("G", hasAttr("a", "v1")),
inGroup("G", hasAttr("b", "v2")),
},
},
{
name: "empty-PC",
explanation: withSource("a Handler should not output SourceKey if the PC is zero"),
f: func(l *slog.Logger) {
l.Info("message")
},
mod: func(r *slog.Record) { r.PC = 0 },
checks: []check{
missingKey(slog.SourceKey),
},
},
}
// TestHandler tests a [slog.Handler].
// If TestHandler finds any misbehaviors, it returns an error for each,
// combined into a single error with [errors.Join].
//
// TestHandler installs the given Handler in a [slog.Logger] and
// makes several calls to the Logger's output methods.
// The Handler should be enabled for levels Info and above.
//
// The results function is invoked after all such calls.
// It should return a slice of map[string]any, one for each call to a Logger output method.
// The keys and values of the map should correspond to the keys and values of the Handler's
// output. Each group in the output should be represented as its own nested map[string]any.
// The standard keys [slog.TimeKey], [slog.LevelKey] and [slog.MessageKey] should be used.
//
// If the Handler outputs JSON, then calling [encoding/json.Unmarshal] with a `map[string]any`
// will create the right data structure.
//
// If a Handler intentionally drops an attribute that is checked by a test,
// then the results function should check for its absence and add it to the map it returns.
func TestHandler(h slog.Handler, results func() []map[string]any) error {
// Run the handler on the test cases.
for _, c := range cases {
ht := h
if c.mod != nil {
ht = &wrapper{h, c.mod}
}
l := slog.New(ht)
c.f(l)
}
// Collect and check the results.
var errs []error
res := results()
if g, w := len(res), len(cases); g != w {
return fmt.Errorf("got %d results, want %d", g, w)
}
for i, got := range results() {
c := cases[i]
for _, check := range c.checks {
if problem := check(got); problem != "" {
errs = append(errs, fmt.Errorf("%s: %s", problem, c.explanation))
}
}
}
return errors.Join(errs...)
}
// Run exercises a [slog.Handler] on the same test cases as [TestHandler], but
// runs each case in a subtest. For each test case, it first calls newHandler to
// get an instance of the handler under test, then runs the test case, then
// calls result to get the result. If the test case fails, it calls t.Error.
func Run(t *testing.T, newHandler func(*testing.T) slog.Handler, result func(*testing.T) map[string]any) {
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
h := newHandler(t)
if c.mod != nil {
h = &wrapper{h, c.mod}
}
l := slog.New(h)
c.f(l)
got := result(t)
for _, check := range c.checks {
if p := check(got); p != "" {
t.Errorf("%s: %s", p, c.explanation)
}
}
})
}
}
type check func(map[string]any) string
func hasKey(key string) check {
return func(m map[string]any) string {
if _, ok := m[key]; !ok {
return fmt.Sprintf("missing key %q", key)
}
return ""
}
}
func missingKey(key string) check {
return func(m map[string]any) string {
if _, ok := m[key]; ok {
return fmt.Sprintf("unexpected key %q", key)
}
return ""
}
}
func hasAttr(key string, wantVal any) check {
return func(m map[string]any) string {
if s := hasKey(key)(m); s != "" {
return s
}
gotVal := m[key]
if !reflect.DeepEqual(gotVal, wantVal) {
return fmt.Sprintf("%q: got %#v, want %#v", key, gotVal, wantVal)
}
return ""
}
}
func inGroup(name string, c check) check {
return func(m map[string]any) string {
v, ok := m[name]
if !ok {
return fmt.Sprintf("missing group %q", name)
}
g, ok := v.(map[string]any)
if !ok {
return fmt.Sprintf("value for group %q is not map[string]any", name)
}
return c(g)
}
}
type wrapper struct {
slog.Handler
mod func(*slog.Record)
}
func (h *wrapper) Handle(ctx context.Context, r slog.Record) error {
h.mod(&r)
return h.Handler.Handle(ctx, r)
}
func withSource(s string) string {
_, file, line, ok := runtime.Caller(1)
if !ok {
panic("runtime.Caller failed")
}
return fmt.Sprintf("%s (%s:%d)", s, file, line)
}
type replace struct {
v any
}
func (r *replace) LogValue() slog.Value { return slog.AnyValue(r.v) }
func (r *replace) String() string {
return fmt.Sprintf("<replace(%v)>", r.v)
}

992
src/testing/sub_test.go Normal file
View File

@@ -0,0 +1,992 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package testing
import (
"bytes"
"fmt"
"reflect"
"regexp"
"runtime"
"strings"
"sync"
"sync/atomic"
"time"
)
func init() {
// Make benchmark tests run 10x faster.
benchTime.d = 100 * time.Millisecond
}
func TestTestContext(t *T) {
const (
add1 = 0
done = 1
)
// After each of the calls are applied to the context, the
type call struct {
typ int // run or done
// result from applying the call
running int
waiting int
started bool
}
testCases := []struct {
max int
run []call
}{{
max: 1,
run: []call{
{typ: add1, running: 1, waiting: 0, started: true},
{typ: done, running: 0, waiting: 0, started: false},
},
}, {
max: 1,
run: []call{
{typ: add1, running: 1, waiting: 0, started: true},
{typ: add1, running: 1, waiting: 1, started: false},
{typ: done, running: 1, waiting: 0, started: true},
{typ: done, running: 0, waiting: 0, started: false},
{typ: add1, running: 1, waiting: 0, started: true},
},
}, {
max: 3,
run: []call{
{typ: add1, running: 1, waiting: 0, started: true},
{typ: add1, running: 2, waiting: 0, started: true},
{typ: add1, running: 3, waiting: 0, started: true},
{typ: add1, running: 3, waiting: 1, started: false},
{typ: add1, running: 3, waiting: 2, started: false},
{typ: add1, running: 3, waiting: 3, started: false},
{typ: done, running: 3, waiting: 2, started: true},
{typ: add1, running: 3, waiting: 3, started: false},
{typ: done, running: 3, waiting: 2, started: true},
{typ: done, running: 3, waiting: 1, started: true},
{typ: done, running: 3, waiting: 0, started: true},
{typ: done, running: 2, waiting: 0, started: false},
{typ: done, running: 1, waiting: 0, started: false},
{typ: done, running: 0, waiting: 0, started: false},
},
}}
for i, tc := range testCases {
ctx := &testContext{
startParallel: make(chan bool),
maxParallel: tc.max,
}
for j, call := range tc.run {
doCall := func(f func()) chan bool {
done := make(chan bool)
go func() {
f()
done <- true
}()
return done
}
started := false
switch call.typ {
case add1:
signal := doCall(ctx.waitParallel)
select {
case <-signal:
started = true
case ctx.startParallel <- true:
<-signal
}
case done:
signal := doCall(ctx.release)
select {
case <-signal:
case <-ctx.startParallel:
started = true
<-signal
}
}
if started != call.started {
t.Errorf("%d:%d:started: got %v; want %v", i, j, started, call.started)
}
if ctx.running != call.running {
t.Errorf("%d:%d:running: got %v; want %v", i, j, ctx.running, call.running)
}
if ctx.numWaiting != call.waiting {
t.Errorf("%d:%d:waiting: got %v; want %v", i, j, ctx.numWaiting, call.waiting)
}
}
}
}
func TestTRun(t *T) {
realTest := t
testCases := []struct {
desc string
ok bool
maxPar int
chatty bool
json bool
output string
f func(*T)
}{{
desc: "failnow skips future sequential and parallel tests at same level",
ok: false,
maxPar: 1,
output: `
--- FAIL: failnow skips future sequential and parallel tests at same level (N.NNs)
--- FAIL: failnow skips future sequential and parallel tests at same level/#00 (N.NNs)
`,
f: func(t *T) {
ranSeq := false
ranPar := false
t.Run("", func(t *T) {
t.Run("par", func(t *T) {
t.Parallel()
ranPar = true
})
t.Run("seq", func(t *T) {
ranSeq = true
})
t.FailNow()
t.Run("seq", func(t *T) {
realTest.Error("test must be skipped")
})
t.Run("par", func(t *T) {
t.Parallel()
realTest.Error("test must be skipped.")
})
})
if !ranPar {
realTest.Error("parallel test was not run")
}
if !ranSeq {
realTest.Error("sequential test was not run")
}
},
}, {
desc: "failure in parallel test propagates upwards",
ok: false,
maxPar: 1,
output: `
--- FAIL: failure in parallel test propagates upwards (N.NNs)
--- FAIL: failure in parallel test propagates upwards/#00 (N.NNs)
--- FAIL: failure in parallel test propagates upwards/#00/par (N.NNs)
`,
f: func(t *T) {
t.Run("", func(t *T) {
t.Parallel()
t.Run("par", func(t *T) {
t.Parallel()
t.Fail()
})
})
},
}, {
desc: "skipping without message, chatty",
ok: true,
chatty: true,
output: `
=== RUN skipping without message, chatty
--- SKIP: skipping without message, chatty (N.NNs)`,
f: func(t *T) { t.SkipNow() },
}, {
desc: "chatty with recursion",
ok: true,
chatty: true,
output: `
=== RUN chatty with recursion
=== RUN chatty with recursion/#00
=== RUN chatty with recursion/#00/#00
--- PASS: chatty with recursion (N.NNs)
--- PASS: chatty with recursion/#00 (N.NNs)
--- PASS: chatty with recursion/#00/#00 (N.NNs)`,
f: func(t *T) {
t.Run("", func(t *T) {
t.Run("", func(t *T) {})
})
},
}, {
desc: "chatty with recursion and json",
ok: false,
chatty: true,
json: true,
output: `
^V=== RUN chatty with recursion and json
^V=== RUN chatty with recursion and json/#00
^V=== RUN chatty with recursion and json/#00/#00
^V--- PASS: chatty with recursion and json/#00/#00 (N.NNs)
^V=== NAME chatty with recursion and json/#00
^V=== RUN chatty with recursion and json/#00/#01
sub_test.go:NNN: skip
^V--- SKIP: chatty with recursion and json/#00/#01 (N.NNs)
^V=== NAME chatty with recursion and json/#00
^V=== RUN chatty with recursion and json/#00/#02
sub_test.go:NNN: fail
^V--- FAIL: chatty with recursion and json/#00/#02 (N.NNs)
^V=== NAME chatty with recursion and json/#00
^V--- FAIL: chatty with recursion and json/#00 (N.NNs)
^V=== NAME chatty with recursion and json
^V--- FAIL: chatty with recursion and json (N.NNs)
^V=== NAME `,
f: func(t *T) {
t.Run("", func(t *T) {
t.Run("", func(t *T) {})
t.Run("", func(t *T) { t.Skip("skip") })
t.Run("", func(t *T) { t.Fatal("fail") })
})
},
}, {
desc: "skipping without message, not chatty",
ok: true,
f: func(t *T) { t.SkipNow() },
}, {
desc: "skipping after error",
output: `
--- FAIL: skipping after error (N.NNs)
sub_test.go:NNN: an error
sub_test.go:NNN: skipped`,
f: func(t *T) {
t.Error("an error")
t.Skip("skipped")
},
}, {
desc: "use Run to locally synchronize parallelism",
ok: true,
maxPar: 1,
f: func(t *T) {
var count uint32
t.Run("waitGroup", func(t *T) {
for i := 0; i < 4; i++ {
t.Run("par", func(t *T) {
t.Parallel()
atomic.AddUint32(&count, 1)
})
}
})
if count != 4 {
t.Errorf("count was %d; want 4", count)
}
},
}, {
desc: "alternate sequential and parallel",
// Sequential tests should partake in the counting of running threads.
// Otherwise, if one runs parallel subtests in sequential tests that are
// itself subtests of parallel tests, the counts can get askew.
ok: true,
maxPar: 1,
f: func(t *T) {
t.Run("a", func(t *T) {
t.Parallel()
t.Run("b", func(t *T) {
// Sequential: ensure running count is decremented.
t.Run("c", func(t *T) {
t.Parallel()
})
})
})
},
}, {
desc: "alternate sequential and parallel 2",
// Sequential tests should partake in the counting of running threads.
// Otherwise, if one runs parallel subtests in sequential tests that are
// itself subtests of parallel tests, the counts can get askew.
ok: true,
maxPar: 2,
f: func(t *T) {
for i := 0; i < 2; i++ {
t.Run("a", func(t *T) {
t.Parallel()
time.Sleep(time.Nanosecond)
for i := 0; i < 2; i++ {
t.Run("b", func(t *T) {
time.Sleep(time.Nanosecond)
for i := 0; i < 2; i++ {
t.Run("c", func(t *T) {
t.Parallel()
time.Sleep(time.Nanosecond)
})
}
})
}
})
}
},
}, {
desc: "stress test",
ok: true,
maxPar: 4,
f: func(t *T) {
t.Parallel()
for i := 0; i < 12; i++ {
t.Run("a", func(t *T) {
t.Parallel()
time.Sleep(time.Nanosecond)
for i := 0; i < 12; i++ {
t.Run("b", func(t *T) {
time.Sleep(time.Nanosecond)
for i := 0; i < 12; i++ {
t.Run("c", func(t *T) {
t.Parallel()
time.Sleep(time.Nanosecond)
t.Run("d1", func(t *T) {})
t.Run("d2", func(t *T) {})
t.Run("d3", func(t *T) {})
t.Run("d4", func(t *T) {})
})
}
})
}
})
}
},
}, {
desc: "skip output",
ok: true,
maxPar: 4,
f: func(t *T) {
t.Skip()
},
}, {
desc: "subtest calls error on parent",
ok: false,
output: `
--- FAIL: subtest calls error on parent (N.NNs)
sub_test.go:NNN: first this
sub_test.go:NNN: and now this!
sub_test.go:NNN: oh, and this too`,
maxPar: 1,
f: func(t *T) {
t.Errorf("first this")
outer := t
t.Run("", func(t *T) {
outer.Errorf("and now this!")
})
t.Errorf("oh, and this too")
},
}, {
desc: "subtest calls fatal on parent",
ok: false,
output: `
--- FAIL: subtest calls fatal on parent (N.NNs)
sub_test.go:NNN: first this
sub_test.go:NNN: and now this!
--- FAIL: subtest calls fatal on parent/#00 (N.NNs)
testing.go:NNN: test executed panic(nil) or runtime.Goexit: subtest may have called FailNow on a parent test`,
maxPar: 1,
f: func(t *T) {
outer := t
t.Errorf("first this")
t.Run("", func(t *T) {
outer.Fatalf("and now this!")
})
t.Errorf("Should not reach here.")
},
}, {
desc: "subtest calls error on ancestor",
ok: false,
output: `
--- FAIL: subtest calls error on ancestor (N.NNs)
sub_test.go:NNN: Report to ancestor
--- FAIL: subtest calls error on ancestor/#00 (N.NNs)
sub_test.go:NNN: Still do this
sub_test.go:NNN: Also do this`,
maxPar: 1,
f: func(t *T) {
outer := t
t.Run("", func(t *T) {
t.Run("", func(t *T) {
outer.Errorf("Report to ancestor")
})
t.Errorf("Still do this")
})
t.Errorf("Also do this")
},
}, {
desc: "subtest calls fatal on ancestor",
ok: false,
output: `
--- FAIL: subtest calls fatal on ancestor (N.NNs)
sub_test.go:NNN: Nope`,
maxPar: 1,
f: func(t *T) {
outer := t
t.Run("", func(t *T) {
for i := 0; i < 4; i++ {
t.Run("", func(t *T) {
outer.Fatalf("Nope")
})
t.Errorf("Don't do this")
}
t.Errorf("And neither do this")
})
t.Errorf("Nor this")
},
}, {
desc: "panic on goroutine fail after test exit",
ok: false,
maxPar: 4,
f: func(t *T) {
ch := make(chan bool)
t.Run("", func(t *T) {
go func() {
<-ch
defer func() {
if r := recover(); r == nil {
realTest.Errorf("expected panic")
}
ch <- true
}()
t.Errorf("failed after success")
}()
})
ch <- true
<-ch
},
}, {
desc: "log in finished sub test logs to parent",
ok: false,
output: `
--- FAIL: log in finished sub test logs to parent (N.NNs)
sub_test.go:NNN: message2
sub_test.go:NNN: message1
sub_test.go:NNN: error`,
maxPar: 1,
f: func(t *T) {
ch := make(chan bool)
t.Run("sub", func(t2 *T) {
go func() {
<-ch
t2.Log("message1")
ch <- true
}()
})
t.Log("message2")
ch <- true
<-ch
t.Errorf("error")
},
}, {
// A chatty test should always log with fmt.Print, even if the
// parent test has completed.
desc: "log in finished sub test with chatty",
ok: false,
chatty: true,
output: `
--- FAIL: log in finished sub test with chatty (N.NNs)`,
maxPar: 1,
f: func(t *T) {
ch := make(chan bool)
t.Run("sub", func(t2 *T) {
go func() {
<-ch
t2.Log("message1")
ch <- true
}()
})
t.Log("message2")
ch <- true
<-ch
t.Errorf("error")
},
}, {
// If a subtest panics we should run cleanups.
desc: "cleanup when subtest panics",
ok: false,
chatty: false,
output: `
--- FAIL: cleanup when subtest panics (N.NNs)
--- FAIL: cleanup when subtest panics/sub (N.NNs)
sub_test.go:NNN: running cleanup`,
f: func(t *T) {
t.Cleanup(func() { t.Log("running cleanup") })
t.Run("sub", func(t2 *T) {
t2.FailNow()
})
},
}}
for _, tc := range testCases {
t.Run(tc.desc, func(t *T) {
ctx := newTestContext(tc.maxPar, allMatcher())
buf := &strings.Builder{}
root := &T{
common: common{
signal: make(chan bool),
barrier: make(chan bool),
name: "",
w: buf,
},
context: ctx,
}
if tc.chatty {
root.chatty = newChattyPrinter(root.w)
root.chatty.json = tc.json
}
ok := root.Run(tc.desc, tc.f)
ctx.release()
if ok != tc.ok {
t.Errorf("%s:ok: got %v; want %v", tc.desc, ok, tc.ok)
}
if ok != !root.Failed() {
t.Errorf("%s:root failed: got %v; want %v", tc.desc, !ok, root.Failed())
}
if ctx.running != 0 || ctx.numWaiting != 0 {
t.Errorf("%s:running and waiting non-zero: got %d and %d", tc.desc, ctx.running, ctx.numWaiting)
}
got := strings.TrimSpace(buf.String())
want := strings.TrimSpace(tc.output)
re := makeRegexp(want)
if ok, err := regexp.MatchString(re, got); !ok || err != nil {
t.Errorf("%s:output:\ngot:\n%s\nwant:\n%s", tc.desc, got, want)
}
})
}
}
func TestBRun(t *T) {
work := func(b *B) {
for i := 0; i < b.N; i++ {
time.Sleep(time.Nanosecond)
}
}
testCases := []struct {
desc string
failed bool
chatty bool
output string
f func(*B)
}{{
desc: "simulate sequential run of subbenchmarks.",
f: func(b *B) {
b.Run("", func(b *B) { work(b) })
time1 := b.result.NsPerOp()
b.Run("", func(b *B) { work(b) })
time2 := b.result.NsPerOp()
if time1 >= time2 {
t.Errorf("no time spent in benchmark t1 >= t2 (%d >= %d)", time1, time2)
}
},
}, {
desc: "bytes set by all benchmarks",
f: func(b *B) {
b.Run("", func(b *B) { b.SetBytes(10); work(b) })
b.Run("", func(b *B) { b.SetBytes(10); work(b) })
if b.result.Bytes != 20 {
t.Errorf("bytes: got: %d; want 20", b.result.Bytes)
}
},
}, {
desc: "bytes set by some benchmarks",
// In this case the bytes result is meaningless, so it must be 0.
f: func(b *B) {
b.Run("", func(b *B) { b.SetBytes(10); work(b) })
b.Run("", func(b *B) { work(b) })
b.Run("", func(b *B) { b.SetBytes(10); work(b) })
if b.result.Bytes != 0 {
t.Errorf("bytes: got: %d; want 0", b.result.Bytes)
}
},
}, {
desc: "failure carried over to root",
failed: true,
output: "--- FAIL: root",
f: func(b *B) { b.Fail() },
}, {
desc: "skipping without message, chatty",
chatty: true,
output: "--- SKIP: root",
f: func(b *B) { b.SkipNow() },
}, {
desc: "chatty with recursion",
chatty: true,
f: func(b *B) {
b.Run("", func(b *B) {
b.Run("", func(b *B) {})
})
},
}, {
desc: "skipping without message, not chatty",
f: func(b *B) { b.SkipNow() },
}, {
desc: "skipping after error",
failed: true,
output: `
--- FAIL: root
sub_test.go:NNN: an error
sub_test.go:NNN: skipped`,
f: func(b *B) {
b.Error("an error")
b.Skip("skipped")
},
}, {
desc: "memory allocation",
f: func(b *B) {
const bufSize = 256
alloc := func(b *B) {
var buf [bufSize]byte
for i := 0; i < b.N; i++ {
_ = append([]byte(nil), buf[:]...)
}
}
b.Run("", func(b *B) {
alloc(b)
b.ReportAllocs()
})
b.Run("", func(b *B) {
alloc(b)
b.ReportAllocs()
})
// runtime.MemStats sometimes reports more allocations than the
// benchmark is responsible for. Luckily the point of this test is
// to ensure that the results are not underreported, so we can
// simply verify the lower bound.
if got := b.result.MemAllocs; got < 2 {
t.Errorf("MemAllocs was %v; want 2", got)
}
if got := b.result.MemBytes; got < 2*bufSize {
t.Errorf("MemBytes was %v; want %v", got, 2*bufSize)
}
},
}, {
desc: "cleanup is called",
f: func(b *B) {
var calls, cleanups, innerCalls, innerCleanups int
b.Run("", func(b *B) {
calls++
b.Cleanup(func() {
cleanups++
})
b.Run("", func(b *B) {
b.Cleanup(func() {
innerCleanups++
})
innerCalls++
})
work(b)
})
if calls == 0 || calls != cleanups {
t.Errorf("mismatched cleanups; got %d want %d", cleanups, calls)
}
if innerCalls == 0 || innerCalls != innerCleanups {
t.Errorf("mismatched cleanups; got %d want %d", cleanups, calls)
}
},
}, {
desc: "cleanup is called on failure",
failed: true,
f: func(b *B) {
var calls, cleanups int
b.Run("", func(b *B) {
calls++
b.Cleanup(func() {
cleanups++
})
b.Fatalf("failure")
})
if calls == 0 || calls != cleanups {
t.Errorf("mismatched cleanups; got %d want %d", cleanups, calls)
}
},
}}
hideStdoutForTesting = true
defer func() {
hideStdoutForTesting = false
}()
for _, tc := range testCases {
t.Run(tc.desc, func(t *T) {
var ok bool
buf := &strings.Builder{}
// This is almost like the Benchmark function, except that we override
// the benchtime and catch the failure result of the subbenchmark.
root := &B{
common: common{
signal: make(chan bool),
name: "root",
w: buf,
},
benchFunc: func(b *B) { ok = b.Run("test", tc.f) }, // Use Run to catch failure.
benchTime: durationOrCountFlag{d: 1 * time.Microsecond},
}
if tc.chatty {
root.chatty = newChattyPrinter(root.w)
}
root.runN(1)
if ok != !tc.failed {
t.Errorf("%s:ok: got %v; want %v", tc.desc, ok, !tc.failed)
}
if !ok != root.Failed() {
t.Errorf("%s:root failed: got %v; want %v", tc.desc, !ok, root.Failed())
}
// All tests are run as subtests
if root.result.N != 1 {
t.Errorf("%s: N for parent benchmark was %d; want 1", tc.desc, root.result.N)
}
got := strings.TrimSpace(buf.String())
want := strings.TrimSpace(tc.output)
re := makeRegexp(want)
if ok, err := regexp.MatchString(re, got); !ok || err != nil {
t.Errorf("%s:output:\ngot:\n%s\nwant:\n%s", tc.desc, got, want)
}
})
}
}
func makeRegexp(s string) string {
s = regexp.QuoteMeta(s)
s = strings.ReplaceAll(s, "^V", "\x16")
s = strings.ReplaceAll(s, ":NNN:", `:\d\d\d\d?:`)
s = strings.ReplaceAll(s, "N\\.NNs", `\d*\.\d*s`)
return s
}
func TestBenchmarkOutput(t *T) {
// Ensure Benchmark initialized common.w by invoking it with an error and
// normal case.
Benchmark(func(b *B) { b.Error("do not print this output") })
Benchmark(func(b *B) {})
}
func TestBenchmarkStartsFrom1(t *T) {
var first = true
Benchmark(func(b *B) {
if first && b.N != 1 {
panic(fmt.Sprintf("Benchmark() first N=%v; want 1", b.N))
}
first = false
})
}
func TestBenchmarkReadMemStatsBeforeFirstRun(t *T) {
var first = true
Benchmark(func(b *B) {
if first && (b.startAllocs == 0 || b.startBytes == 0) {
panic("ReadMemStats not called before first run")
}
first = false
})
}
type funcWriter struct {
write func([]byte) (int, error)
}
func (fw *funcWriter) Write(b []byte) (int, error) {
return fw.write(b)
}
func TestRacyOutput(t *T) {
var runs int32 // The number of running Writes
var races int32 // Incremented for each race detected
raceDetector := func(b []byte) (int, error) {
// Check if some other goroutine is concurrently calling Write.
if atomic.LoadInt32(&runs) > 0 {
atomic.AddInt32(&races, 1) // Race detected!
}
atomic.AddInt32(&runs, 1)
defer atomic.AddInt32(&runs, -1)
runtime.Gosched() // Increase probability of a race
return len(b), nil
}
root := &T{
common: common{w: &funcWriter{raceDetector}},
context: newTestContext(1, allMatcher()),
}
root.chatty = newChattyPrinter(root.w)
root.Run("", func(t *T) {
var wg sync.WaitGroup
for i := 0; i < 100; i++ {
wg.Add(1)
go func(i int) {
defer wg.Done()
t.Run(fmt.Sprint(i), func(t *T) {
t.Logf("testing run %d", i)
})
}(i)
}
wg.Wait()
})
if races > 0 {
t.Errorf("detected %d racy Writes", races)
}
}
// The late log message did not include the test name. Issue 29388.
func TestLogAfterComplete(t *T) {
ctx := newTestContext(1, allMatcher())
var buf bytes.Buffer
t1 := &T{
common: common{
// Use a buffered channel so that tRunner can write
// to it although nothing is reading from it.
signal: make(chan bool, 1),
w: &buf,
},
context: ctx,
}
c1 := make(chan bool)
c2 := make(chan string)
tRunner(t1, func(t *T) {
t.Run("TestLateLog", func(t *T) {
go func() {
defer close(c2)
defer func() {
p := recover()
if p == nil {
c2 <- "subtest did not panic"
return
}
s, ok := p.(string)
if !ok {
c2 <- fmt.Sprintf("subtest panic with unexpected value %v", p)
return
}
const want = "Log in goroutine after TestLateLog has completed: log after test"
if !strings.Contains(s, want) {
c2 <- fmt.Sprintf("subtest panic %q does not contain %q", s, want)
}
}()
<-c1
t.Log("log after test")
}()
})
})
close(c1)
if s := <-c2; s != "" {
t.Error(s)
}
}
func TestBenchmark(t *T) {
if Short() {
t.Skip("skipping in short mode")
}
res := Benchmark(func(b *B) {
for i := 0; i < 5; i++ {
b.Run("", func(b *B) {
for i := 0; i < b.N; i++ {
time.Sleep(time.Millisecond)
}
})
}
})
if res.NsPerOp() < 4000000 {
t.Errorf("want >5ms; got %v", time.Duration(res.NsPerOp()))
}
}
func TestCleanup(t *T) {
var cleanups []int
t.Run("test", func(t *T) {
t.Cleanup(func() { cleanups = append(cleanups, 1) })
t.Cleanup(func() { cleanups = append(cleanups, 2) })
})
if got, want := cleanups, []int{2, 1}; !reflect.DeepEqual(got, want) {
t.Errorf("unexpected cleanup record; got %v want %v", got, want)
}
}
func TestConcurrentCleanup(t *T) {
cleanups := 0
t.Run("test", func(t *T) {
var wg sync.WaitGroup
wg.Add(2)
for i := 0; i < 2; i++ {
i := i
go func() {
t.Cleanup(func() {
// Although the calls to Cleanup are concurrent, the functions passed
// to Cleanup should be called sequentially, in some nondeterministic
// order based on when the Cleanup calls happened to be scheduled.
// So these assignments to the cleanups variable should not race.
cleanups |= 1 << i
})
wg.Done()
}()
}
wg.Wait()
})
if cleanups != 1|2 {
t.Errorf("unexpected cleanup; got %d want 3", cleanups)
}
}
func TestCleanupCalledEvenAfterGoexit(t *T) {
cleanups := 0
t.Run("test", func(t *T) {
t.Cleanup(func() {
cleanups++
})
t.Cleanup(func() {
runtime.Goexit()
})
})
if cleanups != 1 {
t.Errorf("unexpected cleanup count; got %d want 1", cleanups)
}
}
func TestRunCleanup(t *T) {
outerCleanup := 0
innerCleanup := 0
t.Run("test", func(t *T) {
t.Cleanup(func() { outerCleanup++ })
t.Run("x", func(t *T) {
t.Cleanup(func() { innerCleanup++ })
})
})
if innerCleanup != 1 {
t.Errorf("unexpected inner cleanup count; got %d want 1", innerCleanup)
}
if outerCleanup != 1 {
t.Errorf("unexpected outer cleanup count; got %d want 0", outerCleanup)
}
}
func TestCleanupParallelSubtests(t *T) {
ranCleanup := 0
t.Run("test", func(t *T) {
t.Cleanup(func() { ranCleanup++ })
t.Run("x", func(t *T) {
t.Parallel()
if ranCleanup > 0 {
t.Error("outer cleanup ran before parallel subtest")
}
})
})
if ranCleanup != 1 {
t.Errorf("unexpected cleanup count; got %d want 1", ranCleanup)
}
}
func TestNestedCleanup(t *T) {
ranCleanup := 0
t.Run("test", func(t *T) {
t.Cleanup(func() {
if ranCleanup != 2 {
t.Errorf("unexpected cleanup count in first cleanup: got %d want 2", ranCleanup)
}
ranCleanup++
})
t.Cleanup(func() {
if ranCleanup != 0 {
t.Errorf("unexpected cleanup count in second cleanup: got %d want 0", ranCleanup)
}
ranCleanup++
t.Cleanup(func() {
if ranCleanup != 1 {
t.Errorf("unexpected cleanup count in nested cleanup: got %d want 1", ranCleanup)
}
ranCleanup++
})
})
})
if ranCleanup != 3 {
t.Errorf("unexpected cleanup count: got %d want 3", ranCleanup)
}
}

2416
src/testing/testing.go Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,31 @@
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !windows
package testing
import "time"
// isWindowsRetryable reports whether err is a Windows error code
// that may be fixed by retrying a failed filesystem operation.
func isWindowsRetryable(err error) bool {
return false
}
// highPrecisionTime represents a single point in time.
// On all systems except Windows, using time.Time is fine.
type highPrecisionTime struct {
now time.Time
}
// highPrecisionTimeNow returns high precision time for benchmarking.
func highPrecisionTimeNow() highPrecisionTime {
return highPrecisionTime{now: time.Now()}
}
// highPrecisionTimeSince returns duration since b.
func highPrecisionTimeSince(b highPrecisionTime) time.Duration {
return time.Since(b.now)
}

815
src/testing/testing_test.go Normal file
View File

@@ -0,0 +1,815 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package testing_test
import (
"bytes"
"fmt"
"internal/race"
"internal/testenv"
"os"
"os/exec"
"path/filepath"
"regexp"
"slices"
"strings"
"sync"
"testing"
"time"
)
// This is exactly what a test would do without a TestMain.
// It's here only so that there is at least one package in the
// standard library with a TestMain, so that code is executed.
func TestMain(m *testing.M) {
if os.Getenv("GO_WANT_RACE_BEFORE_TESTS") == "1" {
doRace()
}
m.Run()
// Note: m.Run currently prints the final "PASS" line, so if any race is
// reported here (after m.Run but before the process exits), it will print
// "PASS", then print the stack traces for the race, then exit with nonzero
// status.
//
// This is a somewhat fundamental race: because the race detector hooks into
// the runtime at a very low level, no matter where we put the printing it
// would be possible to report a race that occurs afterward. However, we could
// theoretically move the printing after TestMain, which would at least do a
// better job of diagnosing races in cleanup functions within TestMain itself.
}
func TestTempDirInCleanup(t *testing.T) {
var dir string
t.Run("test", func(t *testing.T) {
t.Cleanup(func() {
dir = t.TempDir()
})
_ = t.TempDir()
})
fi, err := os.Stat(dir)
if fi != nil {
t.Fatalf("Directory %q from user Cleanup still exists", dir)
}
if !os.IsNotExist(err) {
t.Fatalf("Unexpected error: %v", err)
}
}
func TestTempDirInBenchmark(t *testing.T) {
testing.Benchmark(func(b *testing.B) {
if !b.Run("test", func(b *testing.B) {
// Add a loop so that the test won't fail. See issue 38677.
for i := 0; i < b.N; i++ {
_ = b.TempDir()
}
}) {
t.Fatal("Sub test failure in a benchmark")
}
})
}
func TestTempDir(t *testing.T) {
testTempDir(t)
t.Run("InSubtest", testTempDir)
t.Run("test/subtest", testTempDir)
t.Run("test\\subtest", testTempDir)
t.Run("test:subtest", testTempDir)
t.Run("test/..", testTempDir)
t.Run("../test", testTempDir)
t.Run("test[]", testTempDir)
t.Run("test*", testTempDir)
t.Run("äöüéè", testTempDir)
}
func testTempDir(t *testing.T) {
dirCh := make(chan string, 1)
t.Cleanup(func() {
// Verify directory has been removed.
select {
case dir := <-dirCh:
fi, err := os.Stat(dir)
if os.IsNotExist(err) {
// All good
return
}
if err != nil {
t.Fatal(err)
}
t.Errorf("directory %q still exists: %v, isDir=%v", dir, fi, fi.IsDir())
default:
if !t.Failed() {
t.Fatal("never received dir channel")
}
}
})
dir := t.TempDir()
if dir == "" {
t.Fatal("expected dir")
}
dir2 := t.TempDir()
if dir == dir2 {
t.Fatal("subsequent calls to TempDir returned the same directory")
}
if filepath.Dir(dir) != filepath.Dir(dir2) {
t.Fatalf("calls to TempDir do not share a parent; got %q, %q", dir, dir2)
}
dirCh <- dir
fi, err := os.Stat(dir)
if err != nil {
t.Fatal(err)
}
if !fi.IsDir() {
t.Errorf("dir %q is not a dir", dir)
}
files, err := os.ReadDir(dir)
if err != nil {
t.Fatal(err)
}
if len(files) > 0 {
t.Errorf("unexpected %d files in TempDir: %v", len(files), files)
}
glob := filepath.Join(dir, "*.txt")
if _, err := filepath.Glob(glob); err != nil {
t.Error(err)
}
}
func TestSetenv(t *testing.T) {
tests := []struct {
name string
key string
initialValueExists bool
initialValue string
newValue string
}{
{
name: "initial value exists",
key: "GO_TEST_KEY_1",
initialValueExists: true,
initialValue: "111",
newValue: "222",
},
{
name: "initial value exists but empty",
key: "GO_TEST_KEY_2",
initialValueExists: true,
initialValue: "",
newValue: "222",
},
{
name: "initial value is not exists",
key: "GO_TEST_KEY_3",
initialValueExists: false,
initialValue: "",
newValue: "222",
},
}
for _, test := range tests {
if test.initialValueExists {
if err := os.Setenv(test.key, test.initialValue); err != nil {
t.Fatalf("unable to set env: got %v", err)
}
} else {
os.Unsetenv(test.key)
}
t.Run(test.name, func(t *testing.T) {
t.Setenv(test.key, test.newValue)
if os.Getenv(test.key) != test.newValue {
t.Fatalf("unexpected value after t.Setenv: got %s, want %s", os.Getenv(test.key), test.newValue)
}
})
got, exists := os.LookupEnv(test.key)
if got != test.initialValue {
t.Fatalf("unexpected value after t.Setenv cleanup: got %s, want %s", got, test.initialValue)
}
if exists != test.initialValueExists {
t.Fatalf("unexpected value after t.Setenv cleanup: got %t, want %t", exists, test.initialValueExists)
}
}
}
func TestSetenvWithParallelAfterSetenv(t *testing.T) {
defer func() {
want := "testing: t.Parallel called after t.Setenv; cannot set environment variables in parallel tests"
if got := recover(); got != want {
t.Fatalf("expected panic; got %#v want %q", got, want)
}
}()
t.Setenv("GO_TEST_KEY_1", "value")
t.Parallel()
}
func TestSetenvWithParallelBeforeSetenv(t *testing.T) {
defer func() {
want := "testing: t.Setenv called after t.Parallel; cannot set environment variables in parallel tests"
if got := recover(); got != want {
t.Fatalf("expected panic; got %#v want %q", got, want)
}
}()
t.Parallel()
t.Setenv("GO_TEST_KEY_1", "value")
}
func TestSetenvWithParallelParentBeforeSetenv(t *testing.T) {
t.Parallel()
t.Run("child", func(t *testing.T) {
defer func() {
want := "testing: t.Setenv called after t.Parallel; cannot set environment variables in parallel tests"
if got := recover(); got != want {
t.Fatalf("expected panic; got %#v want %q", got, want)
}
}()
t.Setenv("GO_TEST_KEY_1", "value")
})
}
func TestSetenvWithParallelGrandParentBeforeSetenv(t *testing.T) {
t.Parallel()
t.Run("child", func(t *testing.T) {
t.Run("grand-child", func(t *testing.T) {
defer func() {
want := "testing: t.Setenv called after t.Parallel; cannot set environment variables in parallel tests"
if got := recover(); got != want {
t.Fatalf("expected panic; got %#v want %q", got, want)
}
}()
t.Setenv("GO_TEST_KEY_1", "value")
})
})
}
// testingTrueInInit is part of TestTesting.
var testingTrueInInit = false
// testingTrueInPackageVarInit is part of TestTesting.
var testingTrueInPackageVarInit = testing.Testing()
// init is part of TestTesting.
func init() {
if testing.Testing() {
testingTrueInInit = true
}
}
var testingProg = `
package main
import (
"fmt"
"testing"
)
func main() {
fmt.Println(testing.Testing())
}
`
func TestTesting(t *testing.T) {
if !testing.Testing() {
t.Errorf("testing.Testing() == %t, want %t", testing.Testing(), true)
}
if !testingTrueInInit {
t.Errorf("testing.Testing() called by init function == %t, want %t", testingTrueInInit, true)
}
if !testingTrueInPackageVarInit {
t.Errorf("testing.Testing() variable initialized as %t, want %t", testingTrueInPackageVarInit, true)
}
if testing.Short() {
t.Skip("skipping building a binary in short mode")
}
testenv.MustHaveGoRun(t)
fn := filepath.Join(t.TempDir(), "x.go")
if err := os.WriteFile(fn, []byte(testingProg), 0644); err != nil {
t.Fatal(err)
}
cmd := testenv.Command(t, testenv.GoToolPath(t), "run", fn)
out, err := cmd.CombinedOutput()
if err != nil {
t.Fatalf("%v failed: %v\n%s", cmd, err, out)
}
s := string(bytes.TrimSpace(out))
if s != "false" {
t.Errorf("in non-test testing.Test() returned %q, want %q", s, "false")
}
}
// runTest runs a helper test with -test.v, ignoring its exit status.
// runTest both logs and returns the test output.
func runTest(t *testing.T, test string) []byte {
t.Helper()
testenv.MustHaveExec(t)
exe, err := os.Executable()
if err != nil {
t.Skipf("can't find test executable: %v", err)
}
cmd := testenv.Command(t, exe, "-test.run=^"+test+"$", "-test.bench="+test, "-test.v", "-test.parallel=2", "-test.benchtime=2x")
cmd = testenv.CleanCmdEnv(cmd)
cmd.Env = append(cmd.Env, "GO_WANT_HELPER_PROCESS=1")
out, err := cmd.CombinedOutput()
t.Logf("%v: %v\n%s", cmd, err, out)
return out
}
// doRace provokes a data race that generates a race detector report if run
// under the race detector and is otherwise benign.
func doRace() {
var x int
c1 := make(chan bool)
go func() {
x = 1 // racy write
c1 <- true
}()
_ = x // racy read
<-c1
}
func TestRaceReports(t *testing.T) {
if os.Getenv("GO_WANT_HELPER_PROCESS") == "1" {
// Generate a race detector report in a sub test.
t.Run("Sub", func(t *testing.T) {
doRace()
})
return
}
out := runTest(t, "TestRaceReports")
// We should see at most one race detector report.
c := bytes.Count(out, []byte("race detected"))
want := 0
if race.Enabled {
want = 1
}
if c != want {
t.Errorf("got %d race reports, want %d", c, want)
}
}
// Issue #60083. This used to fail on the race builder.
func TestRaceName(t *testing.T) {
if os.Getenv("GO_WANT_HELPER_PROCESS") == "1" {
doRace()
return
}
out := runTest(t, "TestRaceName")
if regexp.MustCompile(`=== NAME\s*$`).Match(out) {
t.Errorf("incorrectly reported test with no name")
}
}
func TestRaceSubReports(t *testing.T) {
if os.Getenv("GO_WANT_HELPER_PROCESS") == "1" {
t.Parallel()
c1 := make(chan bool, 1)
t.Run("sub", func(t *testing.T) {
t.Run("subsub1", func(t *testing.T) {
t.Parallel()
doRace()
c1 <- true
})
t.Run("subsub2", func(t *testing.T) {
t.Parallel()
doRace()
<-c1
})
})
doRace()
return
}
out := runTest(t, "TestRaceSubReports")
// There should be three race reports: one for each subtest, and one for the
// race after the subtests complete. Note that because the subtests run in
// parallel, the race stacks may both be printed in with one or the other
// test's logs.
cReport := bytes.Count(out, []byte("race detected during execution of test"))
wantReport := 0
if race.Enabled {
wantReport = 3
}
if cReport != wantReport {
t.Errorf("got %d race reports, want %d", cReport, wantReport)
}
// Regardless of when the stacks are printed, we expect each subtest to be
// marked as failed, and that failure should propagate up to the parents.
cFail := bytes.Count(out, []byte("--- FAIL:"))
wantFail := 0
if race.Enabled {
wantFail = 4
}
if cFail != wantFail {
t.Errorf(`got %d "--- FAIL:" lines, want %d`, cReport, wantReport)
}
}
func TestRaceInCleanup(t *testing.T) {
if os.Getenv("GO_WANT_HELPER_PROCESS") == "1" {
t.Cleanup(doRace)
t.Parallel()
t.Run("sub", func(t *testing.T) {
t.Parallel()
// No race should be reported for sub.
})
return
}
out := runTest(t, "TestRaceInCleanup")
// There should be one race report, for the parent test only.
cReport := bytes.Count(out, []byte("race detected during execution of test"))
wantReport := 0
if race.Enabled {
wantReport = 1
}
if cReport != wantReport {
t.Errorf("got %d race reports, want %d", cReport, wantReport)
}
// Only the parent test should be marked as failed.
// (The subtest does not race, and should pass.)
cFail := bytes.Count(out, []byte("--- FAIL:"))
wantFail := 0
if race.Enabled {
wantFail = 1
}
if cFail != wantFail {
t.Errorf(`got %d "--- FAIL:" lines, want %d`, cReport, wantReport)
}
}
func TestDeepSubtestRace(t *testing.T) {
if os.Getenv("GO_WANT_HELPER_PROCESS") == "1" {
t.Run("sub", func(t *testing.T) {
t.Run("subsub", func(t *testing.T) {
t.Run("subsubsub", func(t *testing.T) {
doRace()
})
})
doRace()
})
return
}
out := runTest(t, "TestDeepSubtestRace")
c := bytes.Count(out, []byte("race detected during execution of test"))
want := 0
// There should be two race reports.
if race.Enabled {
want = 2
}
if c != want {
t.Errorf("got %d race reports, want %d", c, want)
}
}
func TestRaceDuringParallelFailsAllSubtests(t *testing.T) {
if os.Getenv("GO_WANT_HELPER_PROCESS") == "1" {
var ready sync.WaitGroup
ready.Add(2)
done := make(chan struct{})
go func() {
ready.Wait()
doRace() // This race happens while both subtests are running.
close(done)
}()
t.Run("sub", func(t *testing.T) {
t.Run("subsub1", func(t *testing.T) {
t.Parallel()
ready.Done()
<-done
})
t.Run("subsub2", func(t *testing.T) {
t.Parallel()
ready.Done()
<-done
})
})
return
}
out := runTest(t, "TestRaceDuringParallelFailsAllSubtests")
c := bytes.Count(out, []byte("race detected during execution of test"))
want := 0
// Each subtest should report the race independently.
if race.Enabled {
want = 2
}
if c != want {
t.Errorf("got %d race reports, want %d", c, want)
}
}
func TestRaceBeforeParallel(t *testing.T) {
if os.Getenv("GO_WANT_HELPER_PROCESS") == "1" {
t.Run("sub", func(t *testing.T) {
doRace()
t.Parallel()
})
return
}
out := runTest(t, "TestRaceBeforeParallel")
c := bytes.Count(out, []byte("race detected during execution of test"))
want := 0
// We should see one race detector report.
if race.Enabled {
want = 1
}
if c != want {
t.Errorf("got %d race reports, want %d", c, want)
}
}
func TestRaceBeforeTests(t *testing.T) {
testenv.MustHaveExec(t)
exe, err := os.Executable()
if err != nil {
t.Skipf("can't find test executable: %v", err)
}
cmd := testenv.Command(t, exe, "-test.run=^$")
cmd = testenv.CleanCmdEnv(cmd)
cmd.Env = append(cmd.Env, "GO_WANT_RACE_BEFORE_TESTS=1")
out, _ := cmd.CombinedOutput()
t.Logf("%s", out)
c := bytes.Count(out, []byte("race detected outside of test execution"))
want := 0
if race.Enabled {
want = 1
}
if c != want {
t.Errorf("got %d race reports; want %d", c, want)
}
}
func TestBenchmarkRace(t *testing.T) {
out := runTest(t, "BenchmarkRacy")
c := bytes.Count(out, []byte("race detected during execution of test"))
want := 0
// We should see one race detector report.
if race.Enabled {
want = 1
}
if c != want {
t.Errorf("got %d race reports; want %d", c, want)
}
}
func BenchmarkRacy(b *testing.B) {
if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" {
b.Skipf("skipping intentionally-racy benchmark")
}
for i := 0; i < b.N; i++ {
doRace()
}
}
func TestBenchmarkSubRace(t *testing.T) {
out := runTest(t, "BenchmarkSubRacy")
c := bytes.Count(out, []byte("race detected during execution of test"))
want := 0
// We should see two race detector reports:
// one in the sub-bencmark, and one in the parent afterward.
if race.Enabled {
want = 2
}
if c != want {
t.Errorf("got %d race reports; want %d", c, want)
}
}
func BenchmarkSubRacy(b *testing.B) {
if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" {
b.Skipf("skipping intentionally-racy benchmark")
}
b.Run("non-racy", func(b *testing.B) {
tot := 0
for i := 0; i < b.N; i++ {
tot++
}
_ = tot
})
b.Run("racy", func(b *testing.B) {
for i := 0; i < b.N; i++ {
doRace()
}
})
doRace() // should be reported separately
}
func TestRunningTests(t *testing.T) {
t.Parallel()
// Regression test for https://go.dev/issue/64404:
// on timeout, the "running tests" message should not include
// tests that are waiting on parked subtests.
if os.Getenv("GO_WANT_HELPER_PROCESS") == "1" {
for i := 0; i < 2; i++ {
t.Run(fmt.Sprintf("outer%d", i), func(t *testing.T) {
t.Parallel()
for j := 0; j < 2; j++ {
t.Run(fmt.Sprintf("inner%d", j), func(t *testing.T) {
t.Parallel()
for {
time.Sleep(1 * time.Millisecond)
}
})
}
})
}
}
timeout := 10 * time.Millisecond
for {
cmd := testenv.Command(t, os.Args[0], "-test.run=^"+t.Name()+"$", "-test.timeout="+timeout.String(), "-test.parallel=4")
cmd.Env = append(cmd.Environ(), "GO_WANT_HELPER_PROCESS=1")
out, err := cmd.CombinedOutput()
t.Logf("%v:\n%s", cmd, out)
if _, ok := err.(*exec.ExitError); !ok {
t.Fatal(err)
}
// Because the outer subtests (and TestRunningTests itself) are marked as
// parallel, their test functions return (and are no longer “running”)
// before the inner subtests are released to run and hang.
// Only those inner subtests should be reported as running.
want := []string{
"TestRunningTests/outer0/inner0",
"TestRunningTests/outer0/inner1",
"TestRunningTests/outer1/inner0",
"TestRunningTests/outer1/inner1",
}
got, ok := parseRunningTests(out)
if slices.Equal(got, want) {
break
}
if ok {
t.Logf("found running tests:\n%s\nwant:\n%s", strings.Join(got, "\n"), strings.Join(want, "\n"))
} else {
t.Logf("no running tests found")
}
t.Logf("retrying with longer timeout")
timeout *= 2
}
}
func TestRunningTestsInCleanup(t *testing.T) {
t.Parallel()
if os.Getenv("GO_WANT_HELPER_PROCESS") == "1" {
for i := 0; i < 2; i++ {
t.Run(fmt.Sprintf("outer%d", i), func(t *testing.T) {
// Not parallel: we expect to see only one outer test,
// stuck in cleanup after its subtest finishes.
t.Cleanup(func() {
for {
time.Sleep(1 * time.Millisecond)
}
})
for j := 0; j < 2; j++ {
t.Run(fmt.Sprintf("inner%d", j), func(t *testing.T) {
t.Parallel()
})
}
})
}
}
timeout := 10 * time.Millisecond
for {
cmd := testenv.Command(t, os.Args[0], "-test.run=^"+t.Name()+"$", "-test.timeout="+timeout.String())
cmd.Env = append(cmd.Environ(), "GO_WANT_HELPER_PROCESS=1")
out, err := cmd.CombinedOutput()
t.Logf("%v:\n%s", cmd, out)
if _, ok := err.(*exec.ExitError); !ok {
t.Fatal(err)
}
// TestRunningTestsInCleanup is blocked in the call to t.Run,
// but its test function has not yet returned so it should still
// be considered to be running.
// outer1 hasn't even started yet, so only outer0 and the top-level
// test function should be reported as running.
want := []string{
"TestRunningTestsInCleanup",
"TestRunningTestsInCleanup/outer0",
}
got, ok := parseRunningTests(out)
if slices.Equal(got, want) {
break
}
if ok {
t.Logf("found running tests:\n%s\nwant:\n%s", strings.Join(got, "\n"), strings.Join(want, "\n"))
} else {
t.Logf("no running tests found")
}
t.Logf("retrying with longer timeout")
timeout *= 2
}
}
func parseRunningTests(out []byte) (runningTests []string, ok bool) {
inRunningTests := false
for _, line := range strings.Split(string(out), "\n") {
if inRunningTests {
// Package testing adds one tab, the panic printer adds another.
if trimmed, ok := strings.CutPrefix(line, "\t\t"); ok {
if name, _, ok := strings.Cut(trimmed, " "); ok {
runningTests = append(runningTests, name)
continue
}
}
// This line is not the name of a running test.
return runningTests, true
}
if strings.TrimSpace(line) == "running tests:" {
inRunningTests = true
}
}
return nil, false
}
func TestConcurrentRun(t *testing.T) {
// Regression test for https://go.dev/issue/64402:
// this deadlocked after https://go.dev/cl/506755.
block := make(chan struct{})
var ready, done sync.WaitGroup
for i := 0; i < 2; i++ {
ready.Add(1)
done.Add(1)
go t.Run("", func(*testing.T) {
ready.Done()
<-block
done.Done()
})
}
ready.Wait()
close(block)
done.Wait()
}
func TestParentRun(t1 *testing.T) {
// Regression test for https://go.dev/issue/64402:
// this deadlocked after https://go.dev/cl/506755.
t1.Run("outer", func(t2 *testing.T) {
t2.Log("Hello outer!")
t1.Run("not_inner", func(t3 *testing.T) { // Note: this is t1.Run, not t2.Run.
t3.Log("Hello inner!")
})
})
}

View File

@@ -0,0 +1,70 @@
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build windows
package testing
import (
"errors"
"internal/syscall/windows"
"math/bits"
"syscall"
"time"
)
// isWindowsRetryable reports whether err is a Windows error code
// that may be fixed by retrying a failed filesystem operation.
func isWindowsRetryable(err error) bool {
for {
unwrapped := errors.Unwrap(err)
if unwrapped == nil {
break
}
err = unwrapped
}
if err == syscall.ERROR_ACCESS_DENIED {
return true // Observed in https://go.dev/issue/50051.
}
if err == windows.ERROR_SHARING_VIOLATION {
return true // Observed in https://go.dev/issue/51442.
}
return false
}
// highPrecisionTime represents a single point in time with query performance counter.
// time.Time on Windows has low system granularity, which is not suitable for
// measuring short time intervals.
//
// TODO: If Windows runtime implements high resolution timing then highPrecisionTime
// can be removed.
type highPrecisionTime struct {
now int64
}
// highPrecisionTimeNow returns high precision time for benchmarking.
func highPrecisionTimeNow() highPrecisionTime {
var t highPrecisionTime
// This should always succeed for Windows XP and above.
t.now = windows.QueryPerformanceCounter()
return t
}
func (a highPrecisionTime) sub(b highPrecisionTime) time.Duration {
delta := a.now - b.now
if queryPerformanceFrequency == 0 {
queryPerformanceFrequency = windows.QueryPerformanceFrequency()
}
hi, lo := bits.Mul64(uint64(delta), uint64(time.Second)/uint64(time.Nanosecond))
quo, _ := bits.Div64(hi, lo, uint64(queryPerformanceFrequency))
return time.Duration(quo)
}
var queryPerformanceFrequency int64
// highPrecisionTimeSince returns duration since a.
func highPrecisionTimeSince(a highPrecisionTime) time.Duration {
return highPrecisionTimeNow().sub(a)
}

View File

@@ -0,0 +1,25 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package testing_test
import (
"testing"
"time"
)
var sink time.Time
var sinkHPT testing.HighPrecisionTime
func BenchmarkTimeNow(b *testing.B) {
for i := 0; i < b.N; i++ {
sink = time.Now()
}
}
func BenchmarkHighPrecisionTimeNow(b *testing.B) {
for i := 0; i < b.N; i++ {
sinkHPT = testing.HighPrecisionTimeNow()
}
}