Update to go1.23.3

This commit is contained in:
Vorapol Rinsatitnon
2024-11-09 19:25:36 +11:00
parent 998d564602
commit a95773b5a1
25 changed files with 685 additions and 151 deletions

View File

@@ -1,2 +1,2 @@
go1.23.2
time 2024-09-28T01:34:15Z
go1.23.3
time 2024-11-06T18:46:45Z

View File

@@ -805,13 +805,19 @@ func elfwritefreebsdsig(out *OutBuf) int {
return int(sh.Size)
}
func addbuildinfo(val string) {
func addbuildinfo(ctxt *Link) {
val := *flagHostBuildid
if val == "gobuildid" {
buildID := *flagBuildid
if buildID == "" {
Exitf("-B gobuildid requires a Go build ID supplied via -buildid")
}
if ctxt.IsDarwin() {
buildinfo = uuidFromGoBuildId(buildID)
return
}
hashedBuildID := notsha256.Sum256([]byte(buildID))
buildinfo = hashedBuildID[:20]
@@ -821,11 +827,13 @@ func addbuildinfo(val string) {
if !strings.HasPrefix(val, "0x") {
Exitf("-B argument must start with 0x: %s", val)
}
ov := val
val = val[2:]
const maxLen = 32
maxLen := 32
if ctxt.IsDarwin() {
maxLen = 16
}
if hex.DecodedLen(len(val)) > maxLen {
Exitf("-B option too long (max %d digits): %s", maxLen, ov)
}

View File

@@ -297,6 +297,8 @@ func getMachoHdr() *MachoHdr {
return &machohdr
}
// Create a new Mach-O load command. ndata is the number of 32-bit words for
// the data (not including the load command header).
func newMachoLoad(arch *sys.Arch, type_ uint32, ndata uint32) *MachoLoad {
if arch.PtrSize == 8 && (ndata&1 != 0) {
ndata++
@@ -849,6 +851,20 @@ func asmbMacho(ctxt *Link) {
}
}
if ctxt.IsInternal() && len(buildinfo) > 0 {
ml := newMachoLoad(ctxt.Arch, LC_UUID, 4)
// Mach-O UUID is 16 bytes
if len(buildinfo) < 16 {
buildinfo = append(buildinfo, make([]byte, 16)...)
}
// By default, buildinfo is already in UUIDv3 format
// (see uuidFromGoBuildId).
ml.data[0] = ctxt.Arch.ByteOrder.Uint32(buildinfo)
ml.data[1] = ctxt.Arch.ByteOrder.Uint32(buildinfo[4:])
ml.data[2] = ctxt.Arch.ByteOrder.Uint32(buildinfo[8:])
ml.data[3] = ctxt.Arch.ByteOrder.Uint32(buildinfo[12:])
}
if ctxt.IsInternal() && ctxt.NeedCodeSign() {
ml := newMachoLoad(ctxt.Arch, LC_CODE_SIGNATURE, 2)
ml.data[0] = uint32(codesigOff)

View File

@@ -42,7 +42,7 @@ func uuidFromGoBuildId(buildID string) []byte {
// to use this UUID flavor than any of the others. This is similar
// to how other linkers handle this (for example this code in lld:
// https://github.com/llvm/llvm-project/blob/2a3a79ce4c2149d7787d56f9841b66cacc9061d0/lld/MachO/Writer.cpp#L524).
rv[6] &= 0xcf
rv[6] &= 0x0f
rv[6] |= 0x30
rv[8] &= 0x3f
rv[8] |= 0xc0

View File

@@ -95,6 +95,7 @@ var (
flagN = flag.Bool("n", false, "no-op (deprecated)")
FlagS = flag.Bool("s", false, "disable symbol table")
flag8 bool // use 64-bit addresses in symbol table
flagHostBuildid = flag.String("B", "", "set ELF NT_GNU_BUILD_ID `note` or Mach-O UUID; use \"gobuildid\" to generate it from the Go build ID")
flagInterpreter = flag.String("I", "", "use `linker` as ELF dynamic linker")
flagCheckLinkname = flag.Bool("checklinkname", true, "check linkname symbol references")
FlagDebugTramp = flag.Int("debugtramp", 0, "debug trampolines")
@@ -196,7 +197,6 @@ func Main(arch *sys.Arch, theArch Arch) {
flag.Var(&ctxt.LinkMode, "linkmode", "set link `mode`")
flag.Var(&ctxt.BuildMode, "buildmode", "set build `mode`")
flag.BoolVar(&ctxt.compressDWARF, "compressdwarf", true, "compress DWARF if possible")
objabi.Flagfn1("B", "add an ELF NT_GNU_BUILD_ID `note` when using ELF; use \"gobuildid\" to generate it from the Go build ID", addbuildinfo)
objabi.Flagfn1("L", "add specified `directory` to library path", func(a string) { Lflag(ctxt, a) })
objabi.AddVersionFlag() // -V
objabi.Flagfn1("X", "add string value `definition` of the form importpath.name=value", func(s string) { addstrdata1(ctxt, s) })
@@ -294,6 +294,10 @@ func Main(arch *sys.Arch, theArch Arch) {
*flagBuildid = "go-openbsd"
}
if *flagHostBuildid != "" {
addbuildinfo(ctxt)
}
// enable benchmarking
var bench *benchmark.Metrics
if len(*benchmarkFlag) != 0 {

View File

@@ -32,28 +32,46 @@ func SendFile(dstFD *FD, src int, pos, remain int64) (written int64, err error,
if int64(n) > remain {
n = int(remain)
}
m := n
pos1 := pos
n, err = syscall.Sendfile(dst, src, &pos1, n)
if n > 0 {
pos += int64(n)
written += int64(n)
remain -= int64(n)
// (n, nil) indicates that sendfile(2) has transferred
// the exact number of bytes we requested, or some unretryable
// error have occurred with partial bytes sent. Either way, we
// don't need to go through the following logic to check EINTR
// or fell into dstFD.pd.waitWrite, just continue to send the
// next chunk or break the loop.
if n == m {
continue
} else if err != syscall.EAGAIN &&
err != syscall.EINTR &&
err != syscall.EBUSY {
// Particularly, EPIPE. Errors like that would normally lead
// the subsequent sendfile(2) call to (-1, EBADF).
break
}
} else if err != syscall.EAGAIN && err != syscall.EINTR {
// This includes syscall.ENOSYS (no kernel
// support) and syscall.EINVAL (fd types which
// don't implement sendfile), and other errors.
// We should end the loop when there is no error
// returned from sendfile(2) or it is not a retryable error.
break
}
if err == syscall.EINTR {
continue
}
// This includes syscall.ENOSYS (no kernel
// support) and syscall.EINVAL (fd types which
// don't implement sendfile), and other errors.
// We should end the loop when there is no error
// returned from sendfile(2) or it is not a retryable error.
if err != syscall.EAGAIN {
break
}
if err = dstFD.pd.waitWrite(dstFD.isFile); err != nil {
break
}
}
if err == syscall.EAGAIN {
err = nil
}
handled = written != 0 || (err != syscall.ENOSYS && err != syscall.EINVAL)
return
}

View File

@@ -50,6 +50,9 @@ func SendFile(dstFD *FD, src int, remain int64) (written int64, err error, handl
break
}
}
if err == syscall.EAGAIN {
err = nil
}
handled = written != 0 || (err != syscall.ENOSYS && err != syscall.EINVAL)
return
}

View File

@@ -61,6 +61,9 @@ func SendFile(dstFD *FD, src int, pos, remain int64) (written int64, err error,
break
}
}
if err == syscall.EAGAIN {
err = nil
}
handled = written != 0 || (err != syscall.ENOSYS && err != syscall.EINVAL)
return
}

154
src/os/copy_test.go Normal file
View File

@@ -0,0 +1,154 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package os_test
import (
"bytes"
"errors"
"io"
"math/rand/v2"
"net"
"os"
"runtime"
"sync"
"testing"
"golang.org/x/net/nettest"
)
// Exercise sendfile/splice fast paths with a moderately large file.
//
// https://go.dev/issue/70000
func TestLargeCopyViaNetwork(t *testing.T) {
const size = 10 * 1024 * 1024
dir := t.TempDir()
src, err := os.Create(dir + "/src")
if err != nil {
t.Fatal(err)
}
defer src.Close()
if _, err := io.CopyN(src, newRandReader(), size); err != nil {
t.Fatal(err)
}
if _, err := src.Seek(0, 0); err != nil {
t.Fatal(err)
}
dst, err := os.Create(dir + "/dst")
if err != nil {
t.Fatal(err)
}
defer dst.Close()
client, server := createSocketPair(t, "tcp")
var wg sync.WaitGroup
wg.Add(2)
go func() {
defer wg.Done()
if n, err := io.Copy(dst, server); n != size || err != nil {
t.Errorf("copy to destination = %v, %v; want %v, nil", n, err, size)
}
}()
go func() {
defer wg.Done()
defer client.Close()
if n, err := io.Copy(client, src); n != size || err != nil {
t.Errorf("copy from source = %v, %v; want %v, nil", n, err, size)
}
}()
wg.Wait()
if _, err := dst.Seek(0, 0); err != nil {
t.Fatal(err)
}
if err := compareReaders(dst, io.LimitReader(newRandReader(), size)); err != nil {
t.Fatal(err)
}
}
func compareReaders(a, b io.Reader) error {
bufa := make([]byte, 4096)
bufb := make([]byte, 4096)
for {
na, erra := io.ReadFull(a, bufa)
if erra != nil && erra != io.EOF {
return erra
}
nb, errb := io.ReadFull(b, bufb)
if errb != nil && errb != io.EOF {
return errb
}
if !bytes.Equal(bufa[:na], bufb[:nb]) {
return errors.New("contents mismatch")
}
if erra == io.EOF && errb == io.EOF {
break
}
}
return nil
}
type randReader struct {
rand *rand.Rand
}
func newRandReader() *randReader {
return &randReader{rand.New(rand.NewPCG(0, 0))}
}
func (r *randReader) Read(p []byte) (int, error) {
var v uint64
var n int
for i := range p {
if n == 0 {
v = r.rand.Uint64()
n = 8
}
p[i] = byte(v & 0xff)
v >>= 8
n--
}
return len(p), nil
}
func createSocketPair(t *testing.T, proto string) (client, server net.Conn) {
t.Helper()
if !nettest.TestableNetwork(proto) {
t.Skipf("%s does not support %q", runtime.GOOS, proto)
}
ln, err := nettest.NewLocalListener(proto)
if err != nil {
t.Fatalf("NewLocalListener error: %v", err)
}
t.Cleanup(func() {
if ln != nil {
ln.Close()
}
if client != nil {
client.Close()
}
if server != nil {
server.Close()
}
})
ch := make(chan struct{})
go func() {
var err error
server, err = ln.Accept()
if err != nil {
t.Errorf("Accept new connection error: %v", err)
}
ch <- struct{}{}
}()
client, err = net.Dial(proto, ln.Addr().String())
<-ch
if err != nil {
t.Fatalf("Dial new connection error: %v", err)
}
return client, server
}

View File

@@ -8,12 +8,17 @@
// v5.3: pidfd_open syscall, clone3 syscall;
// v5.4: P_PIDFD idtype support for waitid syscall;
// v5.6: pidfd_getfd syscall.
//
// N.B. Alternative Linux implementations may not follow this ordering. e.g.,
// QEMU user mode 7.2 added pidfd_open, but CLONE_PIDFD was not added until
// 8.0.
package os
import (
"errors"
"internal/syscall/unix"
"runtime"
"sync"
"syscall"
"unsafe"
@@ -139,14 +144,21 @@ func pidfdWorks() bool {
var checkPidfdOnce = sync.OnceValue(checkPidfd)
// checkPidfd checks whether all required pidfd-related syscalls work.
// This consists of pidfd_open and pidfd_send_signal syscalls, and waitid
// syscall with idtype of P_PIDFD.
// checkPidfd checks whether all required pidfd-related syscalls work. This
// consists of pidfd_open and pidfd_send_signal syscalls, waitid syscall with
// idtype of P_PIDFD, and clone(CLONE_PIDFD).
//
// Reasons for non-working pidfd syscalls include an older kernel and an
// execution environment in which the above system calls are restricted by
// seccomp or a similar technology.
func checkPidfd() error {
// In Android version < 12, pidfd-related system calls are not allowed
// by seccomp and trigger the SIGSYS signal. See issue #69065.
if runtime.GOOS == "android" {
ignoreSIGSYS()
defer restoreSIGSYS()
}
// Get a pidfd of the current process (opening of "/proc/self" won't
// work for waitid).
fd, err := unix.PidFDOpen(syscall.Getpid(), 0)
@@ -172,5 +184,27 @@ func checkPidfd() error {
return NewSyscallError("pidfd_send_signal", err)
}
// Verify that clone(CLONE_PIDFD) works.
//
// This shouldn't be necessary since pidfd_open was added in Linux 5.3,
// after CLONE_PIDFD in Linux 5.2, but some alternative Linux
// implementations may not adhere to this ordering.
if err := checkClonePidfd(); err != nil {
return err
}
return nil
}
// Provided by syscall.
//
//go:linkname checkClonePidfd
func checkClonePidfd() error
// Provided by runtime.
//
//go:linkname ignoreSIGSYS
func ignoreSIGSYS()
//go:linkname restoreSIGSYS
func restoreSIGSYS()

View File

@@ -14,15 +14,12 @@ import (
"net"
. "os"
"path/filepath"
"runtime"
"strconv"
"strings"
"sync"
"syscall"
"testing"
"time"
"golang.org/x/net/nettest"
)
func TestCopyFileRange(t *testing.T) {
@@ -784,41 +781,3 @@ func testGetPollFDAndNetwork(t *testing.T, proto string) {
t.Fatalf("server Control error: %v", err)
}
}
func createSocketPair(t *testing.T, proto string) (client, server net.Conn) {
t.Helper()
if !nettest.TestableNetwork(proto) {
t.Skipf("%s does not support %q", runtime.GOOS, proto)
}
ln, err := nettest.NewLocalListener(proto)
if err != nil {
t.Fatalf("NewLocalListener error: %v", err)
}
t.Cleanup(func() {
if ln != nil {
ln.Close()
}
if client != nil {
client.Close()
}
if server != nil {
server.Close()
}
})
ch := make(chan struct{})
go func() {
var err error
server, err = ln.Accept()
if err != nil {
t.Errorf("Accept new connection error: %v", err)
}
ch <- struct{}{}
}()
client, err = net.Dial(proto, ln.Addr().String())
<-ch
if err != nil {
t.Fatalf("Dial new connection error: %v", err)
}
return client, server
}

View File

@@ -208,6 +208,18 @@ func coroswitch_m(gp *g) {
// directly if possible.
setGNoWB(&mp.curg, gnext)
setMNoWB(&gnext.m, mp)
// Synchronize with any out-standing goroutine profile. We're about to start
// executing, and an invariant of the profiler is that we tryRecordGoroutineProfile
// whenever a goroutine is about to start running.
//
// N.B. We must do this before transitioning to _Grunning but after installing gnext
// in curg, so that we have a valid curg for allocation (tryRecordGoroutineProfile
// may allocate).
if goroutineProfile.active {
tryRecordGoroutineProfile(gnext, nil, osyield)
}
if !gnext.atomicstatus.CompareAndSwap(_Gwaiting, _Grunning) {
// The CAS failed: use casgstatus, which will take care of
// coordinating with the garbage collector about the state change.

View File

@@ -1136,11 +1136,12 @@ func expandFrames(p []BlockProfileRecord) {
for i := range p {
cf := CallersFrames(p[i].Stack())
j := 0
for ; j < len(expandedStack); j++ {
for j < len(expandedStack) {
f, more := cf.Next()
// f.PC is a "call PC", but later consumers will expect
// "return PCs"
expandedStack[j] = f.PC + 1
j++
if !more {
break
}
@@ -1270,7 +1271,8 @@ func pprof_mutexProfileInternal(p []profilerecord.BlockProfileRecord) (n int, ok
// of calling ThreadCreateProfile directly.
func ThreadCreateProfile(p []StackRecord) (n int, ok bool) {
return threadCreateProfileInternal(len(p), func(r profilerecord.StackRecord) {
copy(p[0].Stack0[:], r.Stack)
i := copy(p[0].Stack0[:], r.Stack)
clear(p[0].Stack0[i:])
p = p[1:]
})
}
@@ -1649,7 +1651,8 @@ func GoroutineProfile(p []StackRecord) (n int, ok bool) {
return
}
for i, mr := range records[0:n] {
copy(p[i].Stack0[:], mr.Stack)
l := copy(p[i].Stack0[:], mr.Stack)
clear(p[i].Stack0[l:])
}
return
}

View File

@@ -879,8 +879,9 @@ func runPerThreadSyscall() {
}
const (
_SI_USER = 0
_SI_TKILL = -6
_SI_USER = 0
_SI_TKILL = -6
_SYS_SECCOMP = 1
)
// sigFromUser reports whether the signal was sent because of a call
@@ -892,6 +893,14 @@ func (c *sigctxt) sigFromUser() bool {
return code == _SI_USER || code == _SI_TKILL
}
// sigFromSeccomp reports whether the signal was sent from seccomp.
//
//go:nosplit
func (c *sigctxt) sigFromSeccomp() bool {
code := int32(c.sigcode())
return code == _SYS_SECCOMP
}
//go:nosplit
func mprotect(addr unsafe.Pointer, n uintptr, prot int32) (ret int32, errno int32) {
r, _, err := syscall.Syscall6(syscall.SYS_MPROTECT, uintptr(addr), n, uintptr(prot), 0, 0, 0)

View File

@@ -13,3 +13,10 @@ package runtime
func (c *sigctxt) sigFromUser() bool {
return c.sigcode() == _SI_USER
}
// sigFromSeccomp reports whether the signal was sent from seccomp.
//
//go:nosplit
func (c *sigctxt) sigFromSeccomp() bool {
return false
}

View File

@@ -145,7 +145,7 @@ func TestMemoryProfiler(t *testing.T) {
}
t.Logf("Profile = %v", p)
stks := stacks(p)
stks := profileStacks(p)
for _, test := range tests {
if !containsStack(stks, test.stk) {
t.Fatalf("No matching stack entry for %q\n\nProfile:\n%v\n", test.stk, p)

View File

@@ -15,6 +15,7 @@ import (
"internal/syscall/unix"
"internal/testenv"
"io"
"iter"
"math"
"math/big"
"os"
@@ -981,7 +982,7 @@ func TestBlockProfile(t *testing.T) {
t.Fatalf("invalid profile: %v", err)
}
stks := stacks(p)
stks := profileStacks(p)
for _, test := range tests {
if !containsStack(stks, test.stk) {
t.Errorf("No matching stack entry for %v, want %+v", test.name, test.stk)
@@ -991,7 +992,7 @@ func TestBlockProfile(t *testing.T) {
}
func stacks(p *profile.Profile) (res [][]string) {
func profileStacks(p *profile.Profile) (res [][]string) {
for _, s := range p.Sample {
var stk []string
for _, l := range s.Location {
@@ -1004,6 +1005,22 @@ func stacks(p *profile.Profile) (res [][]string) {
return res
}
func blockRecordStacks(records []runtime.BlockProfileRecord) (res [][]string) {
for _, record := range records {
frames := runtime.CallersFrames(record.Stack())
var stk []string
for {
frame, more := frames.Next()
stk = append(stk, frame.Function)
if !more {
break
}
}
res = append(res, stk)
}
return res
}
func containsStack(got [][]string, want []string) bool {
for _, stk := range got {
if len(stk) < len(want) {
@@ -1288,7 +1305,7 @@ func TestMutexProfile(t *testing.T) {
t.Fatalf("invalid profile: %v", err)
}
stks := stacks(p)
stks := profileStacks(p)
for _, want := range [][]string{
{"sync.(*Mutex).Unlock", "runtime/pprof.blockMutexN.func1"},
} {
@@ -1328,6 +1345,28 @@ func TestMutexProfile(t *testing.T) {
t.Fatalf("profile samples total %v, want within range [%v, %v] (target: %v)", d, lo, hi, N*D)
}
})
t.Run("records", func(t *testing.T) {
// Record a mutex profile using the structured record API.
var records []runtime.BlockProfileRecord
for {
n, ok := runtime.MutexProfile(records)
if ok {
records = records[:n]
break
}
records = make([]runtime.BlockProfileRecord, n*2)
}
// Check that we see the same stack trace as the proto profile. For
// historical reason we expect a runtime.goexit root frame here that is
// omitted in the proto profile.
stks := blockRecordStacks(records)
want := []string{"sync.(*Mutex).Unlock", "runtime/pprof.blockMutexN.func1", "runtime.goexit"}
if !containsStack(stks, want) {
t.Errorf("No matching stack entry for %+v", want)
}
})
}
func TestMutexProfileRateAdjust(t *testing.T) {
@@ -1754,6 +1793,50 @@ func TestGoroutineProfileConcurrency(t *testing.T) {
}
}
// Regression test for #69998.
func TestGoroutineProfileCoro(t *testing.T) {
testenv.MustHaveParallelism(t)
goroutineProf := Lookup("goroutine")
// Set up a goroutine to just create and run coroutine goroutines all day.
iterFunc := func() {
p, stop := iter.Pull2(
func(yield func(int, int) bool) {
for i := 0; i < 10000; i++ {
if !yield(i, i) {
return
}
}
},
)
defer stop()
for {
_, _, ok := p()
if !ok {
break
}
}
}
var wg sync.WaitGroup
done := make(chan struct{})
wg.Add(1)
go func() {
defer wg.Done()
for {
iterFunc()
select {
case <-done:
default:
}
}
}()
// Take a goroutine profile. If the bug in #69998 is present, this will crash
// with high probability. We don't care about the output for this bug.
goroutineProf.WriteTo(io.Discard, 1)
}
func BenchmarkGoroutine(b *testing.B) {
withIdle := func(n int, fn func(b *testing.B)) func(b *testing.B) {
return func(b *testing.B) {
@@ -2441,16 +2524,7 @@ func TestTimeVDSO(t *testing.T) {
}
func TestProfilerStackDepth(t *testing.T) {
// Disable sampling, otherwise it's difficult to assert anything.
oldMemRate := runtime.MemProfileRate
runtime.MemProfileRate = 1
runtime.SetBlockProfileRate(1)
oldMutexRate := runtime.SetMutexProfileFraction(1)
t.Cleanup(func() {
runtime.MemProfileRate = oldMemRate
runtime.SetBlockProfileRate(0)
runtime.SetMutexProfileFraction(oldMutexRate)
})
t.Cleanup(disableSampling())
const depth = 128
go produceProfileEvents(t, depth)
@@ -2478,7 +2552,7 @@ func TestProfilerStackDepth(t *testing.T) {
}
t.Logf("Profile = %v", p)
stks := stacks(p)
stks := profileStacks(p)
var stk []string
for _, s := range stks {
if hasPrefix(s, test.prefix) {
@@ -2742,3 +2816,84 @@ runtime/pprof.inlineA`,
})
}
}
func TestProfileRecordNullPadding(t *testing.T) {
// Produce events for the different profile types.
t.Cleanup(disableSampling())
memSink = make([]byte, 1) // MemProfile
<-time.After(time.Millisecond) // BlockProfile
blockMutex(t) // MutexProfile
runtime.GC()
// Test that all profile records are null padded.
testProfileRecordNullPadding(t, "MutexProfile", runtime.MutexProfile)
testProfileRecordNullPadding(t, "GoroutineProfile", runtime.GoroutineProfile)
testProfileRecordNullPadding(t, "BlockProfile", runtime.BlockProfile)
testProfileRecordNullPadding(t, "MemProfile/inUseZero=true", func(p []runtime.MemProfileRecord) (int, bool) {
return runtime.MemProfile(p, true)
})
testProfileRecordNullPadding(t, "MemProfile/inUseZero=false", func(p []runtime.MemProfileRecord) (int, bool) {
return runtime.MemProfile(p, false)
})
// Not testing ThreadCreateProfile because it is broken, see issue 6104.
}
func testProfileRecordNullPadding[T runtime.StackRecord | runtime.MemProfileRecord | runtime.BlockProfileRecord](t *testing.T, name string, fn func([]T) (int, bool)) {
stack0 := func(sr *T) *[32]uintptr {
switch t := any(sr).(type) {
case *runtime.StackRecord:
return &t.Stack0
case *runtime.MemProfileRecord:
return &t.Stack0
case *runtime.BlockProfileRecord:
return &t.Stack0
default:
panic(fmt.Sprintf("unexpected type %T", sr))
}
}
t.Run(name, func(t *testing.T) {
var p []T
for {
n, ok := fn(p)
if ok {
p = p[:n]
break
}
p = make([]T, n*2)
for i := range p {
s0 := stack0(&p[i])
for j := range s0 {
// Poison the Stack0 array to identify lack of zero padding
s0[j] = ^uintptr(0)
}
}
}
if len(p) == 0 {
t.Fatal("no records found")
}
for _, sr := range p {
for i, v := range stack0(&sr) {
if v == ^uintptr(0) {
t.Fatalf("record p[%d].Stack0 is not null padded: %+v", i, sr)
}
}
}
})
}
// disableSampling configures the profilers to capture all events, otherwise
// it's difficult to assert anything.
func disableSampling() func() {
oldMemRate := runtime.MemProfileRate
runtime.MemProfileRate = 1
runtime.SetBlockProfileRate(1)
oldMutexRate := runtime.SetMutexProfileFraction(1)
return func() {
runtime.MemProfileRate = oldMemRate
runtime.SetBlockProfileRate(0)
runtime.SetMutexProfileFraction(oldMutexRate)
}
}

View File

@@ -575,15 +575,15 @@ func TestGdbAutotmpTypes(t *testing.T) {
// Check that the backtrace matches the source code.
types := []string{
"[]main.astruct;",
"bucket<string,main.astruct>;",
"hash<string,main.astruct>;",
"main.astruct;",
"hash<string,main.astruct> * map[string]main.astruct;",
"[]main.astruct",
"bucket<string,main.astruct>",
"hash<string,main.astruct>",
"main.astruct",
"hash<string,main.astruct> * map[string]main.astruct",
}
for _, name := range types {
if !strings.Contains(sgot, name) {
t.Fatalf("could not find %s in 'info typrs astruct' output", name)
t.Fatalf("could not find %q in 'info typrs astruct' output", name)
}
}
}

View File

@@ -605,6 +605,19 @@ var crashing atomic.Int32
var testSigtrap func(info *siginfo, ctxt *sigctxt, gp *g) bool
var testSigusr1 func(gp *g) bool
// sigsysIgnored is non-zero if we are currently ignoring SIGSYS. See issue #69065.
var sigsysIgnored uint32
//go:linkname ignoreSIGSYS os.ignoreSIGSYS
func ignoreSIGSYS() {
atomic.Store(&sigsysIgnored, 1)
}
//go:linkname restoreSIGSYS os.restoreSIGSYS
func restoreSIGSYS() {
atomic.Store(&sigsysIgnored, 0)
}
// sighandler is invoked when a signal occurs. The global g will be
// set to a gsignal goroutine and we will be running on the alternate
// signal stack. The parameter gp will be the value of the global g
@@ -715,6 +728,10 @@ func sighandler(sig uint32, info *siginfo, ctxt unsafe.Pointer, gp *g) {
return
}
if sig == _SIGSYS && c.sigFromSeccomp() && atomic.Load(&sigsysIgnored) != 0 {
return
}
if flags&_SigKill != 0 {
dieFromSignal(sig)
}

View File

@@ -467,43 +467,37 @@ func syscall_getprocaddress(handle uintptr, procname *byte) (outhandle, err uint
//go:linkname syscall_Syscall syscall.Syscall
//go:nosplit
func syscall_Syscall(fn, nargs, a1, a2, a3 uintptr) (r1, r2, err uintptr) {
args := [...]uintptr{a1, a2, a3}
return syscall_SyscallN(fn, args[:nargs]...)
return syscall_syscalln(fn, nargs, a1, a2, a3)
}
//go:linkname syscall_Syscall6 syscall.Syscall6
//go:nosplit
func syscall_Syscall6(fn, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) {
args := [...]uintptr{a1, a2, a3, a4, a5, a6}
return syscall_SyscallN(fn, args[:nargs]...)
return syscall_syscalln(fn, nargs, a1, a2, a3, a4, a5, a6)
}
//go:linkname syscall_Syscall9 syscall.Syscall9
//go:nosplit
func syscall_Syscall9(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2, err uintptr) {
args := [...]uintptr{a1, a2, a3, a4, a5, a6, a7, a8, a9}
return syscall_SyscallN(fn, args[:nargs]...)
return syscall_syscalln(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9)
}
//go:linkname syscall_Syscall12 syscall.Syscall12
//go:nosplit
func syscall_Syscall12(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12 uintptr) (r1, r2, err uintptr) {
args := [...]uintptr{a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12}
return syscall_SyscallN(fn, args[:nargs]...)
return syscall_syscalln(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12)
}
//go:linkname syscall_Syscall15 syscall.Syscall15
//go:nosplit
func syscall_Syscall15(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15 uintptr) (r1, r2, err uintptr) {
args := [...]uintptr{a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15}
return syscall_SyscallN(fn, args[:nargs]...)
return syscall_syscalln(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15)
}
//go:linkname syscall_Syscall18 syscall.Syscall18
//go:nosplit
func syscall_Syscall18(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18 uintptr) (r1, r2, err uintptr) {
args := [...]uintptr{a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18}
return syscall_SyscallN(fn, args[:nargs]...)
return syscall_syscalln(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18)
}
// maxArgs should be divisible by 2, as Windows stack
@@ -516,7 +510,15 @@ const maxArgs = 42
//go:linkname syscall_SyscallN syscall.SyscallN
//go:nosplit
func syscall_SyscallN(fn uintptr, args ...uintptr) (r1, r2, err uintptr) {
if len(args) > maxArgs {
return syscall_syscalln(fn, uintptr(len(args)), args...)
}
//go:nosplit
func syscall_syscalln(fn, n uintptr, args ...uintptr) (r1, r2, err uintptr) {
if n > uintptr(len(args)) {
panic("syscall: n > len(args)") // should not be reachable from user code
}
if n > maxArgs {
panic("runtime: SyscallN has too many arguments")
}
@@ -525,7 +527,7 @@ func syscall_SyscallN(fn uintptr, args ...uintptr) (r1, r2, err uintptr) {
// calls back into Go.
c := &getg().m.winsyscall
c.fn = fn
c.n = uintptr(len(args))
c.n = n
if c.n != 0 {
c.args = uintptr(noescape(unsafe.Pointer(&args[0])))
}

View File

@@ -1215,6 +1215,13 @@ func TestBigStackCallbackSyscall(t *testing.T) {
}
}
func TestSyscallStackUsage(t *testing.T) {
// Test that the stack usage of a syscall doesn't exceed the limit.
// See https://go.dev/issue/69813.
syscall.Syscall15(procSetEvent.Addr(), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
syscall.Syscall18(procSetEvent.Addr(), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
}
// wantLoadLibraryEx reports whether we expect LoadLibraryEx to work for tests.
func wantLoadLibraryEx() bool {
return testenv.Builder() != "" && (runtime.GOARCH == "amd64" || runtime.GOARCH == "386")

View File

@@ -30,35 +30,6 @@ type timer struct {
state uint8 // state bits
isChan bool // timer has a channel; immutable; can be read without lock
// isSending is used to handle races between running a
// channel timer and stopping or resetting the timer.
// It is used only for channel timers (t.isChan == true).
// The lowest zero bit is set when about to send a value on the channel,
// and cleared after sending the value.
// The stop/reset code uses this to detect whether it
// stopped the channel send.
//
// An isSending bit is set only when t.mu is held.
// An isSending bit is cleared only when t.sendLock is held.
// isSending is read only when both t.mu and t.sendLock are held.
//
// Setting and clearing Uint8 bits handles the case of
// a timer that is reset concurrently with unlockAndRun.
// If the reset timer runs immediately, we can wind up with
// concurrent calls to unlockAndRun for the same timer.
// Using matched bit set and clear in unlockAndRun
// ensures that the value doesn't get temporarily out of sync.
//
// We use a uint8 to keep the timer struct small.
// This means that we can only support up to 8 concurrent
// runs of a timer, where a concurrent run can only occur if
// we start a run, unlock the timer, the timer is reset to a new
// value (or the ticker fires again), it is ready to run,
// and it is actually run, all before the first run completes.
// Since completing a run is fast, even 2 concurrent timer runs are
// nearly impossible, so this should be safe in practice.
isSending atomic.Uint8
blocked uint32 // number of goroutines blocked on timer's channel
// Timer wakes up at when, and then at when+period, ... (period > 0 only)
@@ -98,6 +69,20 @@ type timer struct {
// sendLock protects sends on the timer's channel.
// Not used for async (pre-Go 1.23) behavior when debug.asynctimerchan.Load() != 0.
sendLock mutex
// isSending is used to handle races between running a
// channel timer and stopping or resetting the timer.
// It is used only for channel timers (t.isChan == true).
// It is not used for tickers.
// The value is incremented when about to send a value on the channel,
// and decremented after sending the value.
// The stop/reset code uses this to detect whether it
// stopped the channel send.
//
// isSending is incremented only when t.mu is held.
// isSending is decremented only when t.sendLock is held.
// isSending is read only when both t.mu and t.sendLock are held.
isSending atomic.Int32
}
// init initializes a newly allocated timer t.
@@ -467,7 +452,7 @@ func (t *timer) stop() bool {
// send from actually happening. That means
// that we should return true: the timer was
// stopped, even though t.when may be zero.
if t.isSending.Load() > 0 {
if t.period == 0 && t.isSending.Load() > 0 {
pending = true
}
}
@@ -529,6 +514,7 @@ func (t *timer) modify(when, period int64, f func(arg any, seq uintptr, delay in
t.maybeRunAsync()
}
t.trace("modify")
oldPeriod := t.period
t.period = period
if f != nil {
t.f = f
@@ -570,7 +556,7 @@ func (t *timer) modify(when, period int64, f func(arg any, seq uintptr, delay in
// send from actually happening. That means
// that we should return true: the timer was
// stopped, even though t.when may be zero.
if t.isSending.Load() > 0 {
if oldPeriod == 0 && t.isSending.Load() > 0 {
pending = true
}
}
@@ -1063,20 +1049,11 @@ func (t *timer) unlockAndRun(now int64) {
}
async := debug.asynctimerchan.Load() != 0
var isSendingClear uint8
if !async && t.isChan {
if !async && t.isChan && t.period == 0 {
// Tell Stop/Reset that we are sending a value.
// Set the lowest zero bit.
// We do this awkward step because atomic.Uint8
// doesn't support Add or CompareAndSwap.
// We only set bits with t locked.
v := t.isSending.Load()
i := sys.TrailingZeros8(^v)
if i == 8 {
if t.isSending.Add(1) < 0 {
throw("too many concurrent timer firings")
}
isSendingClear = 1 << i
t.isSending.Or(isSendingClear)
}
t.unlock()
@@ -1114,6 +1091,16 @@ func (t *timer) unlockAndRun(now int64) {
// started to send the value. That lets them correctly return
// true meaning that no value was sent.
lock(&t.sendLock)
if t.period == 0 {
// We are committed to possibly sending a value
// based on seq, so no need to keep telling
// stop/modify that we are sending.
if t.isSending.Add(-1) < 0 {
throw("mismatched isSending updates")
}
}
if t.seq != seq {
f = func(any, uintptr, int64) {}
}
@@ -1122,9 +1109,6 @@ func (t *timer) unlockAndRun(now int64) {
f(arg, seq, delay)
if !async && t.isChan {
// We are no longer sending a value.
t.isSending.And(^isSendingClear)
unlock(&t.sendLock)
}

View File

@@ -7,6 +7,7 @@
package syscall
import (
errpkg "errors"
"internal/itoa"
"runtime"
"unsafe"
@@ -328,6 +329,7 @@ func forkAndExecInChild1(argv0 *byte, argv, envv []*byte, chroot, dir *byte, att
if clone3 != nil {
pid, err1 = rawVforkSyscall(_SYS_clone3, uintptr(unsafe.Pointer(clone3)), unsafe.Sizeof(*clone3), 0)
} else {
// N.B. Keep in sync with doCheckClonePidfd.
flags |= uintptr(SIGCHLD)
if runtime.GOARCH == "s390x" {
// On Linux/s390, the first two arguments of clone(2) are swapped.
@@ -743,3 +745,82 @@ func forkAndExecFailureCleanup(attr *ProcAttr, sys *SysProcAttr) {
*sys.PidFD = -1
}
}
// checkClonePidfd verifies that clone(CLONE_PIDFD) works by actually doing a
// clone.
//
//go:linkname os_checkClonePidfd os.checkClonePidfd
func os_checkClonePidfd() error {
pidfd := int32(-1)
pid, errno := doCheckClonePidfd(&pidfd)
if errno != 0 {
return errno
}
if pidfd == -1 {
// Bad: CLONE_PIDFD failed to provide a pidfd. Reap the process
// before returning.
var err error
for {
var status WaitStatus
_, err = Wait4(int(pid), &status, 0, nil)
if err != EINTR {
break
}
}
if err != nil {
return err
}
return errpkg.New("clone(CLONE_PIDFD) failed to return pidfd")
}
// Good: CLONE_PIDFD provided a pidfd. Reap the process and close the
// pidfd.
defer Close(int(pidfd))
for {
const _P_PIDFD = 3
_, _, errno = Syscall6(SYS_WAITID, _P_PIDFD, uintptr(pidfd), 0, WEXITED, 0, 0)
if errno != EINTR {
break
}
}
if errno != 0 {
return errno
}
return nil
}
// doCheckClonePidfd implements the actual clone call of os_checkClonePidfd and
// child execution. This is a separate function so we can separate the child's
// and parent's stack frames if we're using vfork.
//
// This is go:noinline because the point is to keep the stack frames of this
// and os_checkClonePidfd separate.
//
//go:noinline
func doCheckClonePidfd(pidfd *int32) (pid uintptr, errno Errno) {
flags := uintptr(CLONE_VFORK|CLONE_VM|CLONE_PIDFD|SIGCHLD)
if runtime.GOARCH == "s390x" {
// On Linux/s390, the first two arguments of clone(2) are swapped.
pid, errno = rawVforkSyscall(SYS_CLONE, 0, flags, uintptr(unsafe.Pointer(pidfd)))
} else {
pid, errno = rawVforkSyscall(SYS_CLONE, flags, 0, uintptr(unsafe.Pointer(pidfd)))
}
if errno != 0 || pid != 0 {
// If we're in the parent, we must return immediately
// so we're not in the same stack frame as the child.
// This can at most use the return PC, which the child
// will not modify, and the results of
// rawVforkSyscall, which must have been written after
// the child was replaced.
return
}
for {
RawSyscall(SYS_EXIT_GROUP, 0, 0, 0)
}
}

View File

@@ -847,6 +847,57 @@ func testStopResetResultGODEBUG(t *testing.T, testStop bool, godebug string) {
wg.Wait()
}
// Test having a large number of goroutines wake up a ticker simultaneously.
// This used to trigger a crash when run under x/tools/cmd/stress.
func TestMultiWakeupTicker(t *testing.T) {
if testing.Short() {
t.Skip("-short")
}
goroutines := runtime.GOMAXPROCS(0)
timer := NewTicker(Microsecond)
var wg sync.WaitGroup
wg.Add(goroutines)
for range goroutines {
go func() {
defer wg.Done()
for range 100000 {
select {
case <-timer.C:
case <-After(Millisecond):
}
}
}()
}
wg.Wait()
}
// Test having a large number of goroutines wake up a timer simultaneously.
// This used to trigger a crash when run under x/tools/cmd/stress.
func TestMultiWakeupTimer(t *testing.T) {
if testing.Short() {
t.Skip("-short")
}
goroutines := runtime.GOMAXPROCS(0)
timer := NewTimer(Nanosecond)
var wg sync.WaitGroup
wg.Add(goroutines)
for range goroutines {
go func() {
defer wg.Done()
for range 10000 {
select {
case <-timer.C:
default:
}
timer.Reset(Nanosecond)
}
}()
}
wg.Wait()
}
// Benchmark timer latency when the thread that creates the timer is busy with
// other work and the timers must be serviced by other threads.
// https://golang.org/issue/38860

View File

@@ -12,22 +12,29 @@ import (
"bytes"
"log"
"os/exec"
"runtime"
"strings"
)
func main() {
checkLinkOutput("", "-B argument must start with 0x")
// The cannot open file error indicates that the parsing of -B flag
// succeeded and it failed at a later step.
checkLinkOutput("0", "-B argument must start with 0x")
checkLinkOutput("0x", "usage")
checkLinkOutput("0x", "cannot open file nonexistent.o")
checkLinkOutput("0x0", "-B argument must have even number of digits")
checkLinkOutput("0x00", "usage")
checkLinkOutput("0x00", "cannot open file nonexistent.o")
checkLinkOutput("0xYZ", "-B argument contains invalid hex digit")
checkLinkOutput("0x"+strings.Repeat("00", 32), "usage")
checkLinkOutput("0x"+strings.Repeat("00", 33), "-B option too long (max 32 digits)")
maxLen := 32
if runtime.GOOS == "darwin" || runtime.GOOS == "ios" {
maxLen = 16
}
checkLinkOutput("0x"+strings.Repeat("00", maxLen), "cannot open file nonexistent.o")
checkLinkOutput("0x"+strings.Repeat("00", maxLen+1), "-B option too long")
}
func checkLinkOutput(buildid string, message string) {
cmd := exec.Command("go", "tool", "link", "-B", buildid)
cmd := exec.Command("go", "tool", "link", "-B", buildid, "nonexistent.o")
out, err := cmd.CombinedOutput()
if err == nil {
log.Fatalf("expected cmd/link to fail")
@@ -39,6 +46,6 @@ func checkLinkOutput(buildid string, message string) {
}
if !strings.Contains(firstLine, message) {
log.Fatalf("cmd/link output did not include expected message %q: %s", message, firstLine)
log.Fatalf("%s: cmd/link output did not include expected message %q: %s", buildid, message, firstLine)
}
}