Compare commits
6 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f9b11597fa | ||
|
|
289b9e3aad | ||
|
|
51ae6d6612 | ||
|
|
a95773b5a1 | ||
|
|
998d564602 | ||
|
|
a50237a54f |
4
VERSION
4
VERSION
@@ -1,2 +1,2 @@
|
||||
go1.23.2
|
||||
time 2024-09-28T01:34:15Z
|
||||
go1.23.4
|
||||
time 2024-11-27T20:27:20Z
|
||||
|
||||
159
patches/0001-Switch-ProcessPrng-back-to-RtlGenRandom.patch
Normal file
159
patches/0001-Switch-ProcessPrng-back-to-RtlGenRandom.patch
Normal file
@@ -0,0 +1,159 @@
|
||||
From f1f146d4534fc925bceffe523852fe2261841008 Mon Sep 17 00:00:00 2001
|
||||
From: Vorapol Rinsatitnon <vorapol.r@pm.me>
|
||||
Date: Sat, 21 Sep 2024 23:56:11 +1000
|
||||
Subject: [PATCH] Switch ProcessPrng back to RtlGenRandom (revert 693def1)
|
||||
|
||||
---
|
||||
src/crypto/rand/rand.go | 2 +-
|
||||
src/crypto/rand/rand_windows.go | 7 +++--
|
||||
.../syscall/windows/syscall_windows.go | 2 +-
|
||||
.../syscall/windows/zsyscall_windows.go | 7 ++---
|
||||
src/runtime/os_windows.go | 30 ++++++++++++-------
|
||||
5 files changed, 29 insertions(+), 19 deletions(-)
|
||||
|
||||
diff --git a/src/crypto/rand/rand.go b/src/crypto/rand/rand.go
|
||||
index d16d7a1..cdfeb06 100644
|
||||
--- a/src/crypto/rand/rand.go
|
||||
+++ b/src/crypto/rand/rand.go
|
||||
@@ -16,7 +16,7 @@ import "io"
|
||||
// - On macOS and iOS, Reader uses arc4random_buf(3).
|
||||
// - On OpenBSD and NetBSD, Reader uses getentropy(2).
|
||||
// - On other Unix-like systems, Reader reads from /dev/urandom.
|
||||
-// - On Windows, Reader uses the ProcessPrng API.
|
||||
+// - On Windows, Reader uses the RtlGenRandom API.
|
||||
// - On js/wasm, Reader uses the Web Crypto API.
|
||||
// - On wasip1/wasm, Reader uses random_get from wasi_snapshot_preview1.
|
||||
var Reader io.Reader
|
||||
diff --git a/src/crypto/rand/rand_windows.go b/src/crypto/rand/rand_windows.go
|
||||
index 7380f1f..6c0655c 100644
|
||||
--- a/src/crypto/rand/rand_windows.go
|
||||
+++ b/src/crypto/rand/rand_windows.go
|
||||
@@ -15,8 +15,11 @@ func init() { Reader = &rngReader{} }
|
||||
|
||||
type rngReader struct{}
|
||||
|
||||
-func (r *rngReader) Read(b []byte) (int, error) {
|
||||
- if err := windows.ProcessPrng(b); err != nil {
|
||||
+func (r *rngReader) Read(b []byte) (n int, err error) {
|
||||
+ // RtlGenRandom only returns 1<<32-1 bytes at a time. We only read at
|
||||
+ // most 1<<31-1 bytes at a time so that this works the same on 32-bit
|
||||
+ // and 64-bit systems.
|
||||
+ if err := batched(windows.RtlGenRandom, 1<<31-1)(b); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return len(b), nil
|
||||
diff --git a/src/internal/syscall/windows/syscall_windows.go b/src/internal/syscall/windows/syscall_windows.go
|
||||
index cc26a50..b0b5a64 100644
|
||||
--- a/src/internal/syscall/windows/syscall_windows.go
|
||||
+++ b/src/internal/syscall/windows/syscall_windows.go
|
||||
@@ -414,7 +414,7 @@ func ErrorLoadingGetTempPath2() error {
|
||||
//sys DestroyEnvironmentBlock(block *uint16) (err error) = userenv.DestroyEnvironmentBlock
|
||||
//sys CreateEvent(eventAttrs *SecurityAttributes, manualReset uint32, initialState uint32, name *uint16) (handle syscall.Handle, err error) = kernel32.CreateEventW
|
||||
|
||||
-//sys ProcessPrng(buf []byte) (err error) = bcryptprimitives.ProcessPrng
|
||||
+//sys RtlGenRandom(buf []byte) (err error) = advapi32.SystemFunction036
|
||||
|
||||
type FILE_ID_BOTH_DIR_INFO struct {
|
||||
NextEntryOffset uint32
|
||||
diff --git a/src/internal/syscall/windows/zsyscall_windows.go b/src/internal/syscall/windows/zsyscall_windows.go
|
||||
index 414ad26..062641c 100644
|
||||
--- a/src/internal/syscall/windows/zsyscall_windows.go
|
||||
+++ b/src/internal/syscall/windows/zsyscall_windows.go
|
||||
@@ -38,7 +38,6 @@ func errnoErr(e syscall.Errno) error {
|
||||
|
||||
var (
|
||||
modadvapi32 = syscall.NewLazyDLL(sysdll.Add("advapi32.dll"))
|
||||
- modbcryptprimitives = syscall.NewLazyDLL(sysdll.Add("bcryptprimitives.dll"))
|
||||
modiphlpapi = syscall.NewLazyDLL(sysdll.Add("iphlpapi.dll"))
|
||||
modkernel32 = syscall.NewLazyDLL(sysdll.Add("kernel32.dll"))
|
||||
modnetapi32 = syscall.NewLazyDLL(sysdll.Add("netapi32.dll"))
|
||||
@@ -57,7 +56,7 @@ var (
|
||||
procQueryServiceStatus = modadvapi32.NewProc("QueryServiceStatus")
|
||||
procRevertToSelf = modadvapi32.NewProc("RevertToSelf")
|
||||
procSetTokenInformation = modadvapi32.NewProc("SetTokenInformation")
|
||||
- procProcessPrng = modbcryptprimitives.NewProc("ProcessPrng")
|
||||
+ procSystemFunction036 = modadvapi32.NewProc("SystemFunction036")
|
||||
procGetAdaptersAddresses = modiphlpapi.NewProc("GetAdaptersAddresses")
|
||||
procCreateEventW = modkernel32.NewProc("CreateEventW")
|
||||
procGetACP = modkernel32.NewProc("GetACP")
|
||||
@@ -183,12 +182,12 @@ func SetTokenInformation(tokenHandle syscall.Token, tokenInformationClass uint32
|
||||
return
|
||||
}
|
||||
|
||||
-func ProcessPrng(buf []byte) (err error) {
|
||||
+func RtlGenRandom(buf []byte) (err error) {
|
||||
var _p0 *byte
|
||||
if len(buf) > 0 {
|
||||
_p0 = &buf[0]
|
||||
}
|
||||
- r1, _, e1 := syscall.Syscall(procProcessPrng.Addr(), 2, uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), 0)
|
||||
+ r1, _, e1 := syscall.Syscall(procSystemFunction036.Addr(), 2, uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), 0)
|
||||
if r1 == 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
diff --git a/src/runtime/os_windows.go b/src/runtime/os_windows.go
|
||||
index 4aabc29..0273580 100644
|
||||
--- a/src/runtime/os_windows.go
|
||||
+++ b/src/runtime/os_windows.go
|
||||
@@ -127,8 +127,15 @@ var (
|
||||
_WriteFile,
|
||||
_ stdFunction
|
||||
|
||||
- // Use ProcessPrng to generate cryptographically random data.
|
||||
- _ProcessPrng stdFunction
|
||||
+ // Use RtlGenRandom to generate cryptographically random data.
|
||||
+ // This approach has been recommended by Microsoft (see issue
|
||||
+ // 15589 for details).
|
||||
+ // The RtlGenRandom is not listed in advapi32.dll, instead
|
||||
+ // RtlGenRandom function can be found by searching for SystemFunction036.
|
||||
+ // Also some versions of Mingw cannot link to SystemFunction036
|
||||
+ // when building executable as Cgo. So load SystemFunction036
|
||||
+ // manually during runtime startup.
|
||||
+ _RtlGenRandom stdFunction
|
||||
|
||||
// Load ntdll.dll manually during startup, otherwise Mingw
|
||||
// links wrong printf function to cgo executable (see issue
|
||||
@@ -146,10 +153,11 @@ var (
|
||||
)
|
||||
|
||||
var (
|
||||
- bcryptprimitivesdll = [...]uint16{'b', 'c', 'r', 'y', 'p', 't', 'p', 'r', 'i', 'm', 'i', 't', 'i', 'v', 'e', 's', '.', 'd', 'l', 'l', 0}
|
||||
- ntdlldll = [...]uint16{'n', 't', 'd', 'l', 'l', '.', 'd', 'l', 'l', 0}
|
||||
- powrprofdll = [...]uint16{'p', 'o', 'w', 'r', 'p', 'r', 'o', 'f', '.', 'd', 'l', 'l', 0}
|
||||
- winmmdll = [...]uint16{'w', 'i', 'n', 'm', 'm', '.', 'd', 'l', 'l', 0}
|
||||
+ advapi32dll = [...]uint16{'a', 'd', 'v', 'a', 'p', 'i', '3', '2', '.', 'd', 'l', 'l', 0}
|
||||
+ ntdlldll = [...]uint16{'n', 't', 'd', 'l', 'l', '.', 'd', 'l', 'l', 0}
|
||||
+ powrprofdll = [...]uint16{'p', 'o', 'w', 'r', 'p', 'r', 'o', 'f', '.', 'd', 'l', 'l', 0}
|
||||
+ winmmdll = [...]uint16{'w', 'i', 'n', 'm', 'm', '.', 'd', 'l', 'l', 0}
|
||||
+ ws2_32dll = [...]uint16{'w', 's', '2', '_', '3', '2', '.', 'd', 'l', 'l', 0}
|
||||
)
|
||||
|
||||
// Function to be called by windows CreateThread
|
||||
@@ -263,11 +271,11 @@ func windows_QueryPerformanceFrequency() int64 {
|
||||
}
|
||||
|
||||
func loadOptionalSyscalls() {
|
||||
- bcryptPrimitives := windowsLoadSystemLib(bcryptprimitivesdll[:])
|
||||
- if bcryptPrimitives == 0 {
|
||||
- throw("bcryptprimitives.dll not found")
|
||||
+ a32 := windowsLoadSystemLib(advapi32dll[:])
|
||||
+ if a32 == 0 {
|
||||
+ throw("advapi32.dll not found")
|
||||
}
|
||||
- _ProcessPrng = windowsFindfunc(bcryptPrimitives, []byte("ProcessPrng\000"))
|
||||
+ _RtlGenRandom = windowsFindfunc(a32, []byte("SystemFunction036\000"))
|
||||
|
||||
n32 := windowsLoadSystemLib(ntdlldll[:])
|
||||
if n32 == 0 {
|
||||
@@ -500,7 +508,7 @@ func osinit() {
|
||||
//go:nosplit
|
||||
func readRandom(r []byte) int {
|
||||
n := 0
|
||||
- if stdcall2(_ProcessPrng, uintptr(unsafe.Pointer(&r[0])), uintptr(len(r)))&0xff != 0 {
|
||||
+ if stdcall2(_RtlGenRandom, uintptr(unsafe.Pointer(&r[0])), uintptr(len(r)))&0xff != 0 {
|
||||
n = len(r)
|
||||
}
|
||||
return n
|
||||
--
|
||||
2.47.0
|
||||
|
||||
2456
patches/0002-Restore-GOPATH-mode-get.patch
Normal file
2456
patches/0002-Restore-GOPATH-mode-get.patch
Normal file
File diff suppressed because it is too large
Load Diff
211
patches/0003-Restore-related-GOPATH-mode-go-get-functions.patch
Normal file
211
patches/0003-Restore-related-GOPATH-mode-go-get-functions.patch
Normal file
@@ -0,0 +1,211 @@
|
||||
From 3593bfc89de341818aefadf365ca615b78a8c958 Mon Sep 17 00:00:00 2001
|
||||
From: Vorapol Rinsatitnon <vorapol.r@pm.me>
|
||||
Date: Sun, 22 Sep 2024 00:34:20 +1000
|
||||
Subject: [PATCH] Restore related GOPATH-mode go get functions
|
||||
|
||||
---
|
||||
src/cmd/go/internal/load/pkg.go | 59 +++++++++++++++++++++++++++++++++
|
||||
src/cmd/go/internal/par/work.go | 38 +++++++++++++++++++++
|
||||
src/cmd/go/internal/vcs/vcs.go | 39 +++++++++++++++++++---
|
||||
3 files changed, 132 insertions(+), 4 deletions(-)
|
||||
|
||||
diff --git a/src/cmd/go/internal/load/pkg.go b/src/cmd/go/internal/load/pkg.go
|
||||
index 7c402b4..cb38b53 100644
|
||||
--- a/src/cmd/go/internal/load/pkg.go
|
||||
+++ b/src/cmd/go/internal/load/pkg.go
|
||||
@@ -604,6 +604,51 @@ func (sp *ImportStack) shorterThan(t []string) bool {
|
||||
// we return the same pointer each time.
|
||||
var packageCache = map[string]*Package{}
|
||||
|
||||
+// ClearPackageCache clears the in-memory package cache and the preload caches.
|
||||
+// It is only for use by GOPATH-based "go get".
|
||||
+// TODO(jayconrod): When GOPATH-based "go get" is removed, delete this function.
|
||||
+func ClearPackageCache() {
|
||||
+ clear(packageCache)
|
||||
+ resolvedImportCache.Clear()
|
||||
+ packageDataCache.Clear()
|
||||
+}
|
||||
+
|
||||
+// ClearPackageCachePartial clears packages with the given import paths from the
|
||||
+// in-memory package cache and the preload caches. It is only for use by
|
||||
+// GOPATH-based "go get".
|
||||
+// TODO(jayconrod): When GOPATH-based "go get" is removed, delete this function.
|
||||
+func ClearPackageCachePartial(args []string) {
|
||||
+ shouldDelete := make(map[string]bool)
|
||||
+ for _, arg := range args {
|
||||
+ shouldDelete[arg] = true
|
||||
+ if p := packageCache[arg]; p != nil {
|
||||
+ delete(packageCache, arg)
|
||||
+ }
|
||||
+ }
|
||||
+ resolvedImportCache.DeleteIf(func(key importSpec) bool {
|
||||
+ return shouldDelete[key.path]
|
||||
+ })
|
||||
+ packageDataCache.DeleteIf(func(key string) bool {
|
||||
+ return shouldDelete[key]
|
||||
+ })
|
||||
+}
|
||||
+
|
||||
+// ReloadPackageNoFlags is like LoadImport but makes sure
|
||||
+// not to use the package cache.
|
||||
+// It is only for use by GOPATH-based "go get".
|
||||
+// TODO(rsc): When GOPATH-based "go get" is removed, delete this function.
|
||||
+func ReloadPackageNoFlags(arg string, stk *ImportStack) *Package {
|
||||
+ p := packageCache[arg]
|
||||
+ if p != nil {
|
||||
+ delete(packageCache, arg)
|
||||
+ resolvedImportCache.DeleteIf(func(key importSpec) bool {
|
||||
+ return key.path == p.ImportPath
|
||||
+ })
|
||||
+ packageDataCache.Delete(p.ImportPath)
|
||||
+ }
|
||||
+ return LoadPackage(context.TODO(), PackageOpts{}, arg, base.Cwd(), stk, nil, 0)
|
||||
+}
|
||||
+
|
||||
// dirToImportPath returns the pseudo-import path we use for a package
|
||||
// outside the Go path. It begins with _/ and then contains the full path
|
||||
// to the directory. If the package lives in c:\home\gopher\my\pkg then
|
||||
@@ -655,6 +700,20 @@ const (
|
||||
cmdlinePkgLiteral
|
||||
)
|
||||
|
||||
+// LoadImport scans the directory named by path, which must be an import path,
|
||||
+// but possibly a local import path (an absolute file system path or one beginning
|
||||
+// with ./ or ../). A local relative path is interpreted relative to srcDir.
|
||||
+// It returns a *Package describing the package found in that directory.
|
||||
+// LoadImport does not set tool flags and should only be used by
|
||||
+// this package, as part of a bigger load operation, and by GOPATH-based "go get".
|
||||
+// TODO(rsc): When GOPATH-based "go get" is removed, unexport this function.
|
||||
+// The returned PackageError, if any, describes why parent is not allowed
|
||||
+// to import the named package, with the error referring to importPos.
|
||||
+// The PackageError can only be non-nil when parent is not nil.
|
||||
+func LoadImport(ctx context.Context, opts PackageOpts, path, srcDir string, parent *Package, stk *ImportStack, importPos []token.Position, mode int) (*Package, *PackageError) {
|
||||
+ return loadImport(ctx, opts, nil, path, srcDir, parent, stk, importPos, mode)
|
||||
+}
|
||||
+
|
||||
// LoadPackage does Load import, but without a parent package load contezt
|
||||
func LoadPackage(ctx context.Context, opts PackageOpts, path, srcDir string, stk *ImportStack, importPos []token.Position, mode int) *Package {
|
||||
p, err := loadImport(ctx, opts, nil, path, srcDir, nil, stk, importPos, mode)
|
||||
diff --git a/src/cmd/go/internal/par/work.go b/src/cmd/go/internal/par/work.go
|
||||
index 881b51b..3f1e69a 100644
|
||||
--- a/src/cmd/go/internal/par/work.go
|
||||
+++ b/src/cmd/go/internal/par/work.go
|
||||
@@ -180,3 +180,41 @@ func (c *Cache[K, V]) Get(key K) (V, bool) {
|
||||
}
|
||||
return e.result, true
|
||||
}
|
||||
+
|
||||
+// Clear removes all entries in the cache.
|
||||
+//
|
||||
+// Concurrent calls to Get may return old values. Concurrent calls to Do
|
||||
+// may return old values or store results in entries that have been deleted.
|
||||
+//
|
||||
+// TODO(jayconrod): Delete this after the package cache clearing functions
|
||||
+// in internal/load have been removed.
|
||||
+func (c *Cache[K, V]) Clear() {
|
||||
+ c.m.Clear()
|
||||
+}
|
||||
+
|
||||
+// Delete removes an entry from the map. It is safe to call Delete for an
|
||||
+// entry that does not exist. Delete will return quickly, even if the result
|
||||
+// for a key is still being computed; the computation will finish, but the
|
||||
+// result won't be accessible through the cache.
|
||||
+//
|
||||
+// TODO(jayconrod): Delete this after the package cache clearing functions
|
||||
+// in internal/load have been removed.
|
||||
+func (c *Cache[K, V]) Delete(key K) {
|
||||
+ c.m.Delete(key)
|
||||
+}
|
||||
+
|
||||
+// DeleteIf calls pred for each key in the map. If pred returns true for a key,
|
||||
+// DeleteIf removes the corresponding entry. If the result for a key is
|
||||
+// still being computed, DeleteIf will remove the entry without waiting for
|
||||
+// the computation to finish. The result won't be accessible through the cache.
|
||||
+//
|
||||
+// TODO(jayconrod): Delete this after the package cache clearing functions
|
||||
+// in internal/load have been removed.
|
||||
+func (c *Cache[K, V]) DeleteIf(pred func(key K) bool) {
|
||||
+ c.m.Range(func(key, _ any) bool {
|
||||
+ if key := key.(K); pred(key) {
|
||||
+ c.Delete(key)
|
||||
+ }
|
||||
+ return true
|
||||
+ })
|
||||
+}
|
||||
diff --git a/src/cmd/go/internal/vcs/vcs.go b/src/cmd/go/internal/vcs/vcs.go
|
||||
index 19a6a5e..044d02e 100644
|
||||
--- a/src/cmd/go/internal/vcs/vcs.go
|
||||
+++ b/src/cmd/go/internal/vcs/vcs.go
|
||||
@@ -1013,11 +1013,11 @@ var defaultGOVCS = govcsConfig{
|
||||
{"public", []string{"git", "hg"}},
|
||||
}
|
||||
|
||||
-// checkGOVCS checks whether the policy defined by the environment variable
|
||||
+// CheckGOVCS checks whether the policy defined by the environment variable
|
||||
// GOVCS allows the given vcs command to be used with the given repository
|
||||
// root path. Note that root may not be a real package or module path; it's
|
||||
// the same as the root path in the go-import meta tag.
|
||||
-func checkGOVCS(vcs *Cmd, root string) error {
|
||||
+func CheckGOVCS(vcs *Cmd, root string) error {
|
||||
if vcs == vcsMod {
|
||||
// Direct module (proxy protocol) fetches don't
|
||||
// involve an external version control system
|
||||
@@ -1045,6 +1045,37 @@ func checkGOVCS(vcs *Cmd, root string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
+// CheckNested checks for an incorrectly-nested VCS-inside-VCS
|
||||
+// situation for dir, checking parents up until srcRoot.
|
||||
+func CheckNested(vcs *Cmd, dir, srcRoot string) error {
|
||||
+ if len(dir) <= len(srcRoot) || dir[len(srcRoot)] != filepath.Separator {
|
||||
+ return fmt.Errorf("directory %q is outside source root %q", dir, srcRoot)
|
||||
+ }
|
||||
+
|
||||
+ otherDir := dir
|
||||
+ for len(otherDir) > len(srcRoot) {
|
||||
+ for _, otherVCS := range vcsList {
|
||||
+ if isVCSRoot(otherDir, otherVCS.RootNames) {
|
||||
+ // Allow expected vcs in original dir.
|
||||
+ if otherDir == dir && otherVCS == vcs {
|
||||
+ continue
|
||||
+ }
|
||||
+ // Otherwise, we have one VCS inside a different VCS.
|
||||
+ return fmt.Errorf("directory %q uses %s, but parent %q uses %s", dir, vcs.Cmd, otherDir, otherVCS.Cmd)
|
||||
+ }
|
||||
+ }
|
||||
+ // Move to parent.
|
||||
+ newDir := filepath.Dir(otherDir)
|
||||
+ if len(newDir) >= len(otherDir) {
|
||||
+ // Shouldn't happen, but just in case, stop.
|
||||
+ break
|
||||
+ }
|
||||
+ otherDir = newDir
|
||||
+ }
|
||||
+
|
||||
+ return nil
|
||||
+}
|
||||
+
|
||||
// RepoRoot describes the repository root for a tree of source code.
|
||||
type RepoRoot struct {
|
||||
Repo string // repository URL, including scheme
|
||||
@@ -1160,7 +1191,7 @@ func repoRootFromVCSPaths(importPath string, security web.SecurityMode, vcsPaths
|
||||
if vcs == nil {
|
||||
return nil, fmt.Errorf("unknown version control system %q", match["vcs"])
|
||||
}
|
||||
- if err := checkGOVCS(vcs, match["root"]); err != nil {
|
||||
+ if err := CheckGOVCS(vcs, match["root"]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var repoURL string
|
||||
@@ -1349,7 +1380,7 @@ func repoRootForImportDynamic(importPath string, mod ModuleMode, security web.Se
|
||||
}
|
||||
}
|
||||
|
||||
- if err := checkGOVCS(vcs, mmi.Prefix); err != nil {
|
||||
+ if err := CheckGOVCS(vcs, mmi.Prefix); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
--
|
||||
2.47.0
|
||||
|
||||
307
patches/0004-Add-back-LoadLibraryA-fallback.patch
Normal file
307
patches/0004-Add-back-LoadLibraryA-fallback.patch
Normal file
@@ -0,0 +1,307 @@
|
||||
From 3e1a3a3c96117fd4d655dd85d2e2c807e691104e Mon Sep 17 00:00:00 2001
|
||||
From: Vorapol Rinsatitnon <vorapol.r@pm.me>
|
||||
Date: Tue, 24 Dec 2024 19:31:25 +0700
|
||||
Subject: [PATCH] Add back LoadLibraryA fallback
|
||||
|
||||
---
|
||||
src/runtime/export_windows_test.go | 4 ++
|
||||
src/runtime/os_windows.go | 60 ++++++++++++++++++++++++++++-
|
||||
src/runtime/syscall_windows.go | 17 +++++++-
|
||||
src/runtime/syscall_windows_test.go | 23 ++++++++++-
|
||||
src/syscall/dll_windows.go | 28 +++++++++++++-
|
||||
src/syscall/security_windows.go | 1 +
|
||||
src/syscall/zsyscall_windows.go | 10 +++++
|
||||
7 files changed, 136 insertions(+), 7 deletions(-)
|
||||
|
||||
diff --git a/src/runtime/export_windows_test.go b/src/runtime/export_windows_test.go
|
||||
index 4880e62..8bfff0b 100644
|
||||
--- a/src/runtime/export_windows_test.go
|
||||
+++ b/src/runtime/export_windows_test.go
|
||||
@@ -36,3 +36,7 @@ func NewContextStub() *ContextStub {
|
||||
ctx.set_fp(getcallerfp())
|
||||
return &ContextStub{ctx}
|
||||
}
|
||||
+
|
||||
+func LoadLibraryExStatus() (useEx, haveEx, haveFlags bool) {
|
||||
+ return useLoadLibraryEx, _LoadLibraryExW != nil, _AddDllDirectory != nil
|
||||
+}
|
||||
diff --git a/src/runtime/os_windows.go b/src/runtime/os_windows.go
|
||||
index 0273580..c76df9d 100644
|
||||
--- a/src/runtime/os_windows.go
|
||||
+++ b/src/runtime/os_windows.go
|
||||
@@ -41,6 +41,7 @@ const (
|
||||
//go:cgo_import_dynamic runtime._SetThreadContext SetThreadContext%2 "kernel32.dll"
|
||||
//go:cgo_import_dynamic runtime._LoadLibraryExW LoadLibraryExW%3 "kernel32.dll"
|
||||
//go:cgo_import_dynamic runtime._LoadLibraryW LoadLibraryW%1 "kernel32.dll"
|
||||
+//go:cgo_import_dynamic runtime._LoadLibraryA LoadLibraryA%1 "kernel32.dll"
|
||||
//go:cgo_import_dynamic runtime._PostQueuedCompletionStatus PostQueuedCompletionStatus%4 "kernel32.dll"
|
||||
//go:cgo_import_dynamic runtime._QueryPerformanceCounter QueryPerformanceCounter%1 "kernel32.dll"
|
||||
//go:cgo_import_dynamic runtime._QueryPerformanceFrequency QueryPerformanceFrequency%1 "kernel32.dll"
|
||||
@@ -74,6 +75,7 @@ var (
|
||||
// Following syscalls are available on every Windows PC.
|
||||
// All these variables are set by the Windows executable
|
||||
// loader before the Go program starts.
|
||||
+ _AddDllDirectory,
|
||||
_AddVectoredContinueHandler,
|
||||
_AddVectoredExceptionHandler,
|
||||
_CloseHandle,
|
||||
@@ -99,6 +101,7 @@ var (
|
||||
_SetThreadContext,
|
||||
_LoadLibraryExW,
|
||||
_LoadLibraryW,
|
||||
+ _LoadLibraryA,
|
||||
_PostQueuedCompletionStatus,
|
||||
_QueryPerformanceCounter,
|
||||
_QueryPerformanceFrequency,
|
||||
@@ -157,7 +160,6 @@ var (
|
||||
ntdlldll = [...]uint16{'n', 't', 'd', 'l', 'l', '.', 'd', 'l', 'l', 0}
|
||||
powrprofdll = [...]uint16{'p', 'o', 'w', 'r', 'p', 'r', 'o', 'f', '.', 'd', 'l', 'l', 0}
|
||||
winmmdll = [...]uint16{'w', 'i', 'n', 'm', 'm', '.', 'd', 'l', 'l', 0}
|
||||
- ws2_32dll = [...]uint16{'w', 's', '2', '_', '3', '2', '.', 'd', 'l', 'l', 0}
|
||||
)
|
||||
|
||||
// Function to be called by windows CreateThread
|
||||
@@ -253,7 +255,36 @@ func windows_GetSystemDirectory() string {
|
||||
}
|
||||
|
||||
func windowsLoadSystemLib(name []uint16) uintptr {
|
||||
- return stdcall3(_LoadLibraryExW, uintptr(unsafe.Pointer(&name[0])), 0, _LOAD_LIBRARY_SEARCH_SYSTEM32)
|
||||
+ if useLoadLibraryEx {
|
||||
+ return stdcall3(_LoadLibraryExW, uintptr(unsafe.Pointer(&name[0])), 0, _LOAD_LIBRARY_SEARCH_SYSTEM32)
|
||||
+ } else {
|
||||
+ var nameBytes [_MAX_PATH]byte
|
||||
+ n := len(name)
|
||||
+ if n > len(nameBytes) {
|
||||
+ n = len(nameBytes)
|
||||
+ }
|
||||
+ for i := 0; i < n && name[i] != 0; i++ {
|
||||
+ nameBytes[i] = byte(name[i])
|
||||
+ }
|
||||
+
|
||||
+ // Construct the full path
|
||||
+ var fullPath [_MAX_PATH]byte
|
||||
+ copy(fullPath[:], sysDirectory[:sysDirectoryLen])
|
||||
+ pathLen := sysDirectoryLen
|
||||
+ for i := 0; i < len(nameBytes) && nameBytes[i] != 0 && pathLen < _MAX_PATH; i++ {
|
||||
+ fullPath[pathLen] = nameBytes[i]
|
||||
+ pathLen++
|
||||
+ }
|
||||
+
|
||||
+ // Ensure null-termination
|
||||
+ if pathLen < _MAX_PATH {
|
||||
+ fullPath[pathLen] = 0
|
||||
+ } else {
|
||||
+ fullPath[_MAX_PATH-1] = 0
|
||||
+ }
|
||||
+
|
||||
+ return stdcall1(_LoadLibraryA, uintptr(unsafe.Pointer(&fullPath[0])))
|
||||
+ }
|
||||
}
|
||||
|
||||
//go:linkname windows_QueryPerformanceCounter internal/syscall/windows.QueryPerformanceCounter
|
||||
@@ -271,6 +302,15 @@ func windows_QueryPerformanceFrequency() int64 {
|
||||
}
|
||||
|
||||
func loadOptionalSyscalls() {
|
||||
+ var kernel32dll = []byte("kernel32.dll\000")
|
||||
+ k32 := stdcall1(_LoadLibraryA, uintptr(unsafe.Pointer(&kernel32dll[0])))
|
||||
+ if k32 == 0 {
|
||||
+ throw("kernel32.dll not found")
|
||||
+ }
|
||||
+ _AddDllDirectory = windowsFindfunc(k32, []byte("AddDllDirectory\000"))
|
||||
+ _LoadLibraryExW = windowsFindfunc(k32, []byte("LoadLibraryExW\000"))
|
||||
+ useLoadLibraryEx = (_LoadLibraryExW != nil && _AddDllDirectory != nil)
|
||||
+
|
||||
a32 := windowsLoadSystemLib(advapi32dll[:])
|
||||
if a32 == 0 {
|
||||
throw("advapi32.dll not found")
|
||||
@@ -365,6 +405,22 @@ const (
|
||||
// in sys_windows_386.s and sys_windows_amd64.s:
|
||||
func getlasterror() uint32
|
||||
|
||||
+// When loading DLLs, we prefer to use LoadLibraryEx with
|
||||
+// LOAD_LIBRARY_SEARCH_* flags, if available. LoadLibraryEx is not
|
||||
+// available on old Windows, though, and the LOAD_LIBRARY_SEARCH_*
|
||||
+// flags are not available on some versions of Windows without a
|
||||
+// security patch.
|
||||
+//
|
||||
+// https://msdn.microsoft.com/en-us/library/ms684179(v=vs.85).aspx says:
|
||||
+// "Windows 7, Windows Server 2008 R2, Windows Vista, and Windows
|
||||
+// Server 2008: The LOAD_LIBRARY_SEARCH_* flags are available on
|
||||
+// systems that have KB2533623 installed. To determine whether the
|
||||
+// flags are available, use GetProcAddress to get the address of the
|
||||
+// AddDllDirectory, RemoveDllDirectory, or SetDefaultDllDirectories
|
||||
+// function. If GetProcAddress succeeds, the LOAD_LIBRARY_SEARCH_*
|
||||
+// flags can be used with LoadLibraryEx."
|
||||
+var useLoadLibraryEx bool
|
||||
+
|
||||
var timeBeginPeriodRetValue uint32
|
||||
|
||||
// osRelaxMinNS indicates that sysmon shouldn't osRelax if the next
|
||||
diff --git a/src/runtime/syscall_windows.go b/src/runtime/syscall_windows.go
|
||||
index 85b1b8c..eb808fe 100644
|
||||
--- a/src/runtime/syscall_windows.go
|
||||
+++ b/src/runtime/syscall_windows.go
|
||||
@@ -413,10 +413,23 @@ func callbackWrap(a *callbackArgs) {
|
||||
|
||||
const _LOAD_LIBRARY_SEARCH_SYSTEM32 = 0x00000800
|
||||
|
||||
+// When available, this function will use LoadLibraryEx with the filename
|
||||
+// parameter and the important SEARCH_SYSTEM32 argument. But on systems that
|
||||
+// do not have that option, absoluteFilepath should contain a fallback
|
||||
+// to the full path inside of system32 for use with vanilla LoadLibrary.
|
||||
+//
|
||||
//go:linkname syscall_loadsystemlibrary syscall.loadsystemlibrary
|
||||
-func syscall_loadsystemlibrary(filename *uint16) (handle, err uintptr) {
|
||||
- handle, _, err = syscall_SyscallN(uintptr(unsafe.Pointer(_LoadLibraryExW)), uintptr(unsafe.Pointer(filename)), 0, _LOAD_LIBRARY_SEARCH_SYSTEM32)
|
||||
+func syscall_loadsystemlibrary(filename *uint16, absoluteFilepath *uint16) (handle, err uintptr) {
|
||||
+ if useLoadLibraryEx {
|
||||
+ handle, _, err = syscall_SyscallN(uintptr(unsafe.Pointer(_LoadLibraryExW)), uintptr(unsafe.Pointer(filename)), 0, _LOAD_LIBRARY_SEARCH_SYSTEM32)
|
||||
+ } else {
|
||||
+ handle, _, err = syscall_SyscallN(
|
||||
+ uintptr(unsafe.Pointer(_LoadLibraryW)),
|
||||
+ uintptr(unsafe.Pointer(absoluteFilepath)),
|
||||
+ )
|
||||
+ }
|
||||
KeepAlive(filename)
|
||||
+ KeepAlive(absoluteFilepath)
|
||||
if handle != 0 {
|
||||
err = 0
|
||||
}
|
||||
diff --git a/src/runtime/syscall_windows_test.go b/src/runtime/syscall_windows_test.go
|
||||
index 156cf3e..2db5b61 100644
|
||||
--- a/src/runtime/syscall_windows_test.go
|
||||
+++ b/src/runtime/syscall_windows_test.go
|
||||
@@ -1166,7 +1166,10 @@ uintptr_t cfunc(void) {
|
||||
dll, err = syscall.LoadDLL(name)
|
||||
if err == nil {
|
||||
dll.Release()
|
||||
- t.Fatalf("Bad: insecure load of DLL by base name %q before sysdll registration: %v", name, err)
|
||||
+ if wantLoadLibraryEx() {
|
||||
+ t.Fatalf("Bad: insecure load of DLL by base name %q before sysdll registration: %v", name, err)
|
||||
+ }
|
||||
+ t.Skip("insecure load of DLL, but expected")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1219,6 +1222,24 @@ func TestSyscallStackUsage(t *testing.T) {
|
||||
syscall.Syscall18(procSetEvent.Addr(), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
|
||||
}
|
||||
|
||||
+// wantLoadLibraryEx reports whether we expect LoadLibraryEx to work for tests.
|
||||
+func wantLoadLibraryEx() bool {
|
||||
+ return testenv.Builder() != "" && (runtime.GOARCH == "amd64" || runtime.GOARCH == "386")
|
||||
+}
|
||||
+
|
||||
+func TestLoadLibraryEx(t *testing.T) {
|
||||
+ use, have, flags := runtime.LoadLibraryExStatus()
|
||||
+ if use {
|
||||
+ return // success.
|
||||
+ }
|
||||
+ if wantLoadLibraryEx() {
|
||||
+ t.Fatalf("Expected LoadLibraryEx+flags to be available. (LoadLibraryEx=%v; flags=%v)",
|
||||
+ have, flags)
|
||||
+ }
|
||||
+ t.Skipf("LoadLibraryEx not usable, but not expected. (LoadLibraryEx=%v; flags=%v)",
|
||||
+ have, flags)
|
||||
+}
|
||||
+
|
||||
var (
|
||||
modwinmm = syscall.NewLazyDLL("winmm.dll")
|
||||
modkernel32 = syscall.NewLazyDLL("kernel32.dll")
|
||||
diff --git a/src/syscall/dll_windows.go b/src/syscall/dll_windows.go
|
||||
index a7873e6..bd82b51 100644
|
||||
--- a/src/syscall/dll_windows.go
|
||||
+++ b/src/syscall/dll_windows.go
|
||||
@@ -45,7 +45,7 @@ func Syscall18(trap, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a
|
||||
//go:noescape
|
||||
func SyscallN(trap uintptr, args ...uintptr) (r1, r2 uintptr, err Errno)
|
||||
func loadlibrary(filename *uint16) (handle uintptr, err Errno)
|
||||
-func loadsystemlibrary(filename *uint16) (handle uintptr, err Errno)
|
||||
+func loadsystemlibrary(filename *uint16, absoluteFilepath *uint16) (handle uintptr, err Errno)
|
||||
func getprocaddress(handle uintptr, procname *uint8) (proc uintptr, err Errno)
|
||||
|
||||
// A DLL implements access to a single DLL.
|
||||
@@ -54,6 +54,26 @@ type DLL struct {
|
||||
Handle Handle
|
||||
}
|
||||
|
||||
+// We use this for computing the absolute path for system DLLs on systems
|
||||
+// where SEARCH_SYSTEM32 is not available.
|
||||
+var systemDirectoryPrefix string
|
||||
+
|
||||
+func init() {
|
||||
+ n := uint32(MAX_PATH)
|
||||
+ for {
|
||||
+ b := make([]uint16, n)
|
||||
+ l, e := getSystemDirectory(&b[0], n)
|
||||
+ if e != nil {
|
||||
+ panic("Unable to determine system directory: " + e.Error())
|
||||
+ }
|
||||
+ if l <= n {
|
||||
+ systemDirectoryPrefix = UTF16ToString(b[:l]) + "\\"
|
||||
+ break
|
||||
+ }
|
||||
+ n = l
|
||||
+ }
|
||||
+}
|
||||
+
|
||||
// LoadDLL loads the named DLL file into memory.
|
||||
//
|
||||
// If name is not an absolute path and is not a known system DLL used by
|
||||
@@ -70,7 +90,11 @@ func LoadDLL(name string) (*DLL, error) {
|
||||
var h uintptr
|
||||
var e Errno
|
||||
if sysdll.IsSystemDLL[name] {
|
||||
- h, e = loadsystemlibrary(namep)
|
||||
+ absoluteFilepathp, err := UTF16PtrFromString(systemDirectoryPrefix + name)
|
||||
+ if err != nil {
|
||||
+ return nil, err
|
||||
+ }
|
||||
+ h, e = loadsystemlibrary(namep, absoluteFilepathp)
|
||||
} else {
|
||||
h, e = loadlibrary(namep)
|
||||
}
|
||||
diff --git a/src/syscall/security_windows.go b/src/syscall/security_windows.go
|
||||
index 4e988c4..45b1908 100644
|
||||
--- a/src/syscall/security_windows.go
|
||||
+++ b/src/syscall/security_windows.go
|
||||
@@ -290,6 +290,7 @@ type Tokenprimarygroup struct {
|
||||
//sys OpenProcessToken(h Handle, access uint32, token *Token) (err error) = advapi32.OpenProcessToken
|
||||
//sys GetTokenInformation(t Token, infoClass uint32, info *byte, infoLen uint32, returnedLen *uint32) (err error) = advapi32.GetTokenInformation
|
||||
//sys GetUserProfileDirectory(t Token, dir *uint16, dirLen *uint32) (err error) = userenv.GetUserProfileDirectoryW
|
||||
+//sys getSystemDirectory(dir *uint16, dirLen uint32) (len uint32, err error) = kernel32.GetSystemDirectoryW
|
||||
|
||||
// An access token contains the security information for a logon session.
|
||||
// The system creates an access token when a user logs on, and every
|
||||
diff --git a/src/syscall/zsyscall_windows.go b/src/syscall/zsyscall_windows.go
|
||||
index d8d8594..28369e3 100644
|
||||
--- a/src/syscall/zsyscall_windows.go
|
||||
+++ b/src/syscall/zsyscall_windows.go
|
||||
@@ -128,6 +128,7 @@ var (
|
||||
procGetShortPathNameW = modkernel32.NewProc("GetShortPathNameW")
|
||||
procGetStartupInfoW = modkernel32.NewProc("GetStartupInfoW")
|
||||
procGetStdHandle = modkernel32.NewProc("GetStdHandle")
|
||||
+ procGetSystemDirectoryW = modkernel32.NewProc("GetSystemDirectoryW")
|
||||
procGetSystemTimeAsFileTime = modkernel32.NewProc("GetSystemTimeAsFileTime")
|
||||
procGetTempPathW = modkernel32.NewProc("GetTempPathW")
|
||||
procGetTimeZoneInformation = modkernel32.NewProc("GetTimeZoneInformation")
|
||||
@@ -870,6 +871,15 @@ func GetStdHandle(stdhandle int) (handle Handle, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
+func getSystemDirectory(dir *uint16, dirLen uint32) (len uint32, err error) {
|
||||
+ r0, _, e1 := Syscall(procGetSystemDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(dir)), uintptr(dirLen), 0)
|
||||
+ len = uint32(r0)
|
||||
+ if len == 0 {
|
||||
+ err = errnoErr(e1)
|
||||
+ }
|
||||
+ return
|
||||
+}
|
||||
+
|
||||
func GetSystemTimeAsFileTime(time *Filetime) {
|
||||
Syscall(procGetSystemTimeAsFileTime.Addr(), 1, uintptr(unsafe.Pointer(time)), 0, 0)
|
||||
return
|
||||
--
|
||||
2.39.5
|
||||
|
||||
66
patches/0005-Add-Windows-7-console-handle-workaround.patch
Normal file
66
patches/0005-Add-Windows-7-console-handle-workaround.patch
Normal file
@@ -0,0 +1,66 @@
|
||||
From 60f9e8454df41affe07266e795f8a1d22567fd3e Mon Sep 17 00:00:00 2001
|
||||
From: Vorapol Rinsatitnon <vorapol.r@pm.me>
|
||||
Date: Sat, 5 Oct 2024 14:17:43 +1000
|
||||
Subject: [PATCH] Add Windows 7 console handle workaround (revert 48042aa)
|
||||
|
||||
---
|
||||
src/syscall/exec_windows.go | 29 ++++++++++++++++++++++++++++-
|
||||
1 file changed, 28 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/src/syscall/exec_windows.go b/src/syscall/exec_windows.go
|
||||
index 1220de4..815dfd6 100644
|
||||
--- a/src/syscall/exec_windows.go
|
||||
+++ b/src/syscall/exec_windows.go
|
||||
@@ -317,6 +317,17 @@ func StartProcess(argv0 string, argv []string, attr *ProcAttr) (pid int, handle
|
||||
}
|
||||
}
|
||||
|
||||
+ var maj, min, build uint32
|
||||
+ rtlGetNtVersionNumbers(&maj, &min, &build)
|
||||
+ isWin7 := maj < 6 || (maj == 6 && min <= 1)
|
||||
+ // NT kernel handles are divisible by 4, with the bottom 3 bits left as
|
||||
+ // a tag. The fully set tag correlates with the types of handles we're
|
||||
+ // concerned about here. Except, the kernel will interpret some
|
||||
+ // special handle values, like -1, -2, and so forth, so kernelbase.dll
|
||||
+ // checks to see that those bottom three bits are checked, but that top
|
||||
+ // bit is not checked.
|
||||
+ isLegacyWin7ConsoleHandle := func(handle Handle) bool { return isWin7 && handle&0x10000003 == 3 }
|
||||
+
|
||||
p, _ := GetCurrentProcess()
|
||||
parentProcess := p
|
||||
if sys.ParentProcess != 0 {
|
||||
@@ -325,7 +336,15 @@ func StartProcess(argv0 string, argv []string, attr *ProcAttr) (pid int, handle
|
||||
fd := make([]Handle, len(attr.Files))
|
||||
for i := range attr.Files {
|
||||
if attr.Files[i] > 0 {
|
||||
- err := DuplicateHandle(p, Handle(attr.Files[i]), parentProcess, &fd[i], 0, true, DUPLICATE_SAME_ACCESS)
|
||||
+ destinationProcessHandle := parentProcess
|
||||
+
|
||||
+ // On Windows 7, console handles aren't real handles, and can only be duplicated
|
||||
+ // into the current process, not a parent one, which amounts to the same thing.
|
||||
+ if parentProcess != p && isLegacyWin7ConsoleHandle(Handle(attr.Files[i])) {
|
||||
+ destinationProcessHandle = p
|
||||
+ }
|
||||
+
|
||||
+ err := DuplicateHandle(p, Handle(attr.Files[i]), destinationProcessHandle, &fd[i], 0, true, DUPLICATE_SAME_ACCESS)
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
@@ -356,6 +375,14 @@ func StartProcess(argv0 string, argv []string, attr *ProcAttr) (pid int, handle
|
||||
|
||||
fd = append(fd, sys.AdditionalInheritedHandles...)
|
||||
|
||||
+ // On Windows 7, console handles aren't real handles, so don't pass them
|
||||
+ // through to PROC_THREAD_ATTRIBUTE_HANDLE_LIST.
|
||||
+ for i := range fd {
|
||||
+ if isLegacyWin7ConsoleHandle(fd[i]) {
|
||||
+ fd[i] = 0
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
// The presence of a NULL handle in the list is enough to cause PROC_THREAD_ATTRIBUTE_HANDLE_LIST
|
||||
// to treat the entire list as empty, so remove NULL handles.
|
||||
j := 0
|
||||
--
|
||||
2.47.0
|
||||
|
||||
177
patches/0006-Add-sysSocket-fallback.patch
Normal file
177
patches/0006-Add-sysSocket-fallback.patch
Normal file
@@ -0,0 +1,177 @@
|
||||
From 0468b8b0addf825a274d81630087d62db495a562 Mon Sep 17 00:00:00 2001
|
||||
From: Vorapol Rinsatitnon <vorapol.r@pm.me>
|
||||
Date: Sat, 5 Oct 2024 14:27:19 +1000
|
||||
Subject: [PATCH] Add sysSocket fallback (revert 7c1157f)
|
||||
|
||||
---
|
||||
src/net/hook_windows.go | 1 +
|
||||
src/net/internal/socktest/main_test.go | 2 +-
|
||||
.../internal/socktest/main_windows_test.go | 22 ++++++++++++++
|
||||
src/net/internal/socktest/sys_windows.go | 29 +++++++++++++++++++
|
||||
src/net/main_windows_test.go | 3 ++
|
||||
src/net/sock_windows.go | 14 +++++++++
|
||||
src/syscall/exec_windows.go | 1 -
|
||||
7 files changed, 70 insertions(+), 2 deletions(-)
|
||||
create mode 100644 src/net/internal/socktest/main_windows_test.go
|
||||
|
||||
diff --git a/src/net/hook_windows.go b/src/net/hook_windows.go
|
||||
index f7c5b5a..6b82be5 100644
|
||||
--- a/src/net/hook_windows.go
|
||||
+++ b/src/net/hook_windows.go
|
||||
@@ -13,6 +13,7 @@ var (
|
||||
hostsFilePath = windows.GetSystemDirectory() + "/Drivers/etc/hosts"
|
||||
|
||||
// Placeholders for socket system calls.
|
||||
+ socketFunc func(int, int, int) (syscall.Handle, error) = syscall.Socket
|
||||
wsaSocketFunc func(int32, int32, int32, *syscall.WSAProtocolInfo, uint32, uint32) (syscall.Handle, error) = windows.WSASocket
|
||||
connectFunc func(syscall.Handle, syscall.Sockaddr) error = syscall.Connect
|
||||
listenFunc func(syscall.Handle, int) error = syscall.Listen
|
||||
diff --git a/src/net/internal/socktest/main_test.go b/src/net/internal/socktest/main_test.go
|
||||
index 967ce67..0197feb 100644
|
||||
--- a/src/net/internal/socktest/main_test.go
|
||||
+++ b/src/net/internal/socktest/main_test.go
|
||||
@@ -2,7 +2,7 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
-//go:build !js && !plan9 && !wasip1 && !windows
|
||||
+//go:build !js && !plan9 && !wasip1
|
||||
|
||||
package socktest_test
|
||||
|
||||
diff --git a/src/net/internal/socktest/main_windows_test.go b/src/net/internal/socktest/main_windows_test.go
|
||||
new file mode 100644
|
||||
index 0000000..df1cb97
|
||||
--- /dev/null
|
||||
+++ b/src/net/internal/socktest/main_windows_test.go
|
||||
@@ -0,0 +1,22 @@
|
||||
+// Copyright 2015 The Go Authors. All rights reserved.
|
||||
+// Use of this source code is governed by a BSD-style
|
||||
+// license that can be found in the LICENSE file.
|
||||
+
|
||||
+package socktest_test
|
||||
+
|
||||
+import "syscall"
|
||||
+
|
||||
+var (
|
||||
+ socketFunc func(int, int, int) (syscall.Handle, error)
|
||||
+ closeFunc func(syscall.Handle) error
|
||||
+)
|
||||
+
|
||||
+func installTestHooks() {
|
||||
+ socketFunc = sw.Socket
|
||||
+ closeFunc = sw.Closesocket
|
||||
+}
|
||||
+
|
||||
+func uninstallTestHooks() {
|
||||
+ socketFunc = syscall.Socket
|
||||
+ closeFunc = syscall.Closesocket
|
||||
+}
|
||||
diff --git a/src/net/internal/socktest/sys_windows.go b/src/net/internal/socktest/sys_windows.go
|
||||
index 2f02446..2b89362 100644
|
||||
--- a/src/net/internal/socktest/sys_windows.go
|
||||
+++ b/src/net/internal/socktest/sys_windows.go
|
||||
@@ -9,6 +9,35 @@ import (
|
||||
"syscall"
|
||||
)
|
||||
|
||||
+// Socket wraps syscall.Socket.
|
||||
+func (sw *Switch) Socket(family, sotype, proto int) (s syscall.Handle, err error) {
|
||||
+ sw.once.Do(sw.init)
|
||||
+ so := &Status{Cookie: cookie(family, sotype, proto)}
|
||||
+ sw.fmu.RLock()
|
||||
+ f, _ := sw.fltab[FilterSocket]
|
||||
+ sw.fmu.RUnlock()
|
||||
+ af, err := f.apply(so)
|
||||
+ if err != nil {
|
||||
+ return syscall.InvalidHandle, err
|
||||
+ }
|
||||
+ s, so.Err = syscall.Socket(family, sotype, proto)
|
||||
+ if err = af.apply(so); err != nil {
|
||||
+ if so.Err == nil {
|
||||
+ syscall.Closesocket(s)
|
||||
+ }
|
||||
+ return syscall.InvalidHandle, err
|
||||
+ }
|
||||
+ sw.smu.Lock()
|
||||
+ defer sw.smu.Unlock()
|
||||
+ if so.Err != nil {
|
||||
+ sw.stats.getLocked(so.Cookie).OpenFailed++
|
||||
+ return syscall.InvalidHandle, so.Err
|
||||
+ }
|
||||
+ nso := sw.addLocked(s, family, sotype, proto)
|
||||
+ sw.stats.getLocked(nso.Cookie).Opened++
|
||||
+ return s, nil
|
||||
+}
|
||||
+
|
||||
// WSASocket wraps [syscall.WSASocket].
|
||||
func (sw *Switch) WSASocket(family, sotype, proto int32, protinfo *syscall.WSAProtocolInfo, group uint32, flags uint32) (s syscall.Handle, err error) {
|
||||
sw.once.Do(sw.init)
|
||||
diff --git a/src/net/main_windows_test.go b/src/net/main_windows_test.go
|
||||
index bc024c0..07f21b7 100644
|
||||
--- a/src/net/main_windows_test.go
|
||||
+++ b/src/net/main_windows_test.go
|
||||
@@ -8,6 +8,7 @@ import "internal/poll"
|
||||
|
||||
var (
|
||||
// Placeholders for saving original socket system calls.
|
||||
+ origSocket = socketFunc
|
||||
origWSASocket = wsaSocketFunc
|
||||
origClosesocket = poll.CloseFunc
|
||||
origConnect = connectFunc
|
||||
@@ -17,6 +18,7 @@ var (
|
||||
)
|
||||
|
||||
func installTestHooks() {
|
||||
+ socketFunc = sw.Socket
|
||||
wsaSocketFunc = sw.WSASocket
|
||||
poll.CloseFunc = sw.Closesocket
|
||||
connectFunc = sw.Connect
|
||||
@@ -26,6 +28,7 @@ func installTestHooks() {
|
||||
}
|
||||
|
||||
func uninstallTestHooks() {
|
||||
+ socketFunc = origSocket
|
||||
wsaSocketFunc = origWSASocket
|
||||
poll.CloseFunc = origClosesocket
|
||||
connectFunc = origConnect
|
||||
diff --git a/src/net/sock_windows.go b/src/net/sock_windows.go
|
||||
index a519909..ebdf4c3 100644
|
||||
--- a/src/net/sock_windows.go
|
||||
+++ b/src/net/sock_windows.go
|
||||
@@ -20,6 +20,20 @@ func maxListenerBacklog() int {
|
||||
func sysSocket(family, sotype, proto int) (syscall.Handle, error) {
|
||||
s, err := wsaSocketFunc(int32(family), int32(sotype), int32(proto),
|
||||
nil, 0, windows.WSA_FLAG_OVERLAPPED|windows.WSA_FLAG_NO_HANDLE_INHERIT)
|
||||
+ if err == nil {
|
||||
+ return s, nil
|
||||
+ }
|
||||
+ // WSA_FLAG_NO_HANDLE_INHERIT flag is not supported on some
|
||||
+ // old versions of Windows, see
|
||||
+ // https://msdn.microsoft.com/en-us/library/windows/desktop/ms742212(v=vs.85).aspx
|
||||
+ // for details. Just use syscall.Socket, if windows.WSASocket failed.
|
||||
+ // See ../syscall/exec_unix.go for description of ForkLock.
|
||||
+ syscall.ForkLock.RLock()
|
||||
+ s, err = socketFunc(family, sotype, proto)
|
||||
+ if err == nil {
|
||||
+ syscall.CloseOnExec(s)
|
||||
+ }
|
||||
+ syscall.ForkLock.RUnlock()
|
||||
if err != nil {
|
||||
return syscall.InvalidHandle, os.NewSyscallError("socket", err)
|
||||
}
|
||||
diff --git a/src/syscall/exec_windows.go b/src/syscall/exec_windows.go
|
||||
index 815dfd6..d197380 100644
|
||||
--- a/src/syscall/exec_windows.go
|
||||
+++ b/src/syscall/exec_windows.go
|
||||
@@ -14,7 +14,6 @@ import (
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
-// ForkLock is not used on Windows.
|
||||
var ForkLock sync.RWMutex
|
||||
|
||||
// EscapeArg rewrites command line argument s as prescribed
|
||||
--
|
||||
2.47.0
|
||||
|
||||
82
patches/0007-Add-Windows-version-info-to-syscall.patch
Normal file
82
patches/0007-Add-Windows-version-info-to-syscall.patch
Normal file
@@ -0,0 +1,82 @@
|
||||
From d97201a53d5ec76fa81b091bc0d4d64f6ff6ff8c Mon Sep 17 00:00:00 2001
|
||||
From: Vorapol Rinsatitnon <vorapol.r@pm.me>
|
||||
Date: Sat, 5 Oct 2024 15:10:54 +1000
|
||||
Subject: [PATCH] Add Windows version info to syscall
|
||||
|
||||
---
|
||||
src/syscall/exec_windows.go | 7 ++++---
|
||||
src/syscall/types_windows.go | 10 ++++++++++
|
||||
src/syscall/zsyscall_windows.go | 7 +++++++
|
||||
3 files changed, 21 insertions(+), 3 deletions(-)
|
||||
|
||||
diff --git a/src/syscall/exec_windows.go b/src/syscall/exec_windows.go
|
||||
index d197380..f099a6f 100644
|
||||
--- a/src/syscall/exec_windows.go
|
||||
+++ b/src/syscall/exec_windows.go
|
||||
@@ -316,9 +316,10 @@ func StartProcess(argv0 string, argv []string, attr *ProcAttr) (pid int, handle
|
||||
}
|
||||
}
|
||||
|
||||
- var maj, min, build uint32
|
||||
- rtlGetNtVersionNumbers(&maj, &min, &build)
|
||||
- isWin7 := maj < 6 || (maj == 6 && min <= 1)
|
||||
+ info := _OSVERSIONINFOW{}
|
||||
+ info.osVersionInfoSize = uint32(unsafe.Sizeof(info))
|
||||
+ rtlGetVersion(&info)
|
||||
+ isWin7 := info.majorVersion < 6 || (info.majorVersion == 6 && info.minorVersion <= 1)
|
||||
// NT kernel handles are divisible by 4, with the bottom 3 bits left as
|
||||
// a tag. The fully set tag correlates with the types of handles we're
|
||||
// concerned about here. Except, the kernel will interpret some
|
||||
diff --git a/src/syscall/types_windows.go b/src/syscall/types_windows.go
|
||||
index 6743675..37d0eff 100644
|
||||
--- a/src/syscall/types_windows.go
|
||||
+++ b/src/syscall/types_windows.go
|
||||
@@ -1169,3 +1169,13 @@ const (
|
||||
)
|
||||
|
||||
const UNIX_PATH_MAX = 108 // defined in afunix.h
|
||||
+
|
||||
+// https://learn.microsoft.com/en-us/windows-hardware/drivers/ddi/wdm/ns-wdm-_osversioninfow
|
||||
+type _OSVERSIONINFOW struct {
|
||||
+ osVersionInfoSize uint32
|
||||
+ majorVersion uint32
|
||||
+ minorVersion uint32
|
||||
+ buildNumber uint32
|
||||
+ platformId uint32
|
||||
+ csdVersion [128]uint16
|
||||
+}
|
||||
diff --git a/src/syscall/zsyscall_windows.go b/src/syscall/zsyscall_windows.go
|
||||
index 28369e3..a47b090 100644
|
||||
--- a/src/syscall/zsyscall_windows.go
|
||||
+++ b/src/syscall/zsyscall_windows.go
|
||||
@@ -43,6 +43,7 @@ var (
|
||||
modkernel32 = NewLazyDLL(sysdll.Add("kernel32.dll"))
|
||||
modmswsock = NewLazyDLL(sysdll.Add("mswsock.dll"))
|
||||
modnetapi32 = NewLazyDLL(sysdll.Add("netapi32.dll"))
|
||||
+ modntdll = NewLazyDLL(sysdll.Add("ntdll.dll"))
|
||||
modsecur32 = NewLazyDLL(sysdll.Add("secur32.dll"))
|
||||
modshell32 = NewLazyDLL(sysdll.Add("shell32.dll"))
|
||||
moduserenv = NewLazyDLL(sysdll.Add("userenv.dll"))
|
||||
@@ -169,6 +170,7 @@ var (
|
||||
procNetGetJoinInformation = modnetapi32.NewProc("NetGetJoinInformation")
|
||||
procNetUserGetInfo = modnetapi32.NewProc("NetUserGetInfo")
|
||||
procGetUserNameExW = modsecur32.NewProc("GetUserNameExW")
|
||||
+ procRtlGetVersion = modntdll.NewProc("RtlGetVersion")
|
||||
procTranslateNameW = modsecur32.NewProc("TranslateNameW")
|
||||
procCommandLineToArgvW = modshell32.NewProc("CommandLineToArgvW")
|
||||
procGetUserProfileDirectoryW = moduserenv.NewProc("GetUserProfileDirectoryW")
|
||||
@@ -1228,6 +1230,11 @@ func GetUserNameEx(nameFormat uint32, nameBuffre *uint16, nSize *uint32) (err er
|
||||
return
|
||||
}
|
||||
|
||||
+func rtlGetVersion(info *_OSVERSIONINFOW) {
|
||||
+ Syscall(procRtlGetVersion.Addr(), 1, uintptr(unsafe.Pointer(info)), 0, 0)
|
||||
+ return
|
||||
+}
|
||||
+
|
||||
func TranslateName(accName *uint16, accNameFormat uint32, desiredNameFormat uint32, translatedName *uint16, nSize *uint32) (err error) {
|
||||
r1, _, e1 := Syscall6(procTranslateNameW.Addr(), 5, uintptr(unsafe.Pointer(accName)), uintptr(accNameFormat), uintptr(desiredNameFormat), uintptr(unsafe.Pointer(translatedName)), uintptr(unsafe.Pointer(nSize)), 0)
|
||||
if r1&0xff == 0 {
|
||||
--
|
||||
2.47.0
|
||||
|
||||
@@ -33,7 +33,7 @@ import (
|
||||
"unicode"
|
||||
)
|
||||
|
||||
var globalSkip = func(t *testing.T) {}
|
||||
var globalSkip = func(t testing.TB) {}
|
||||
|
||||
// Program to run.
|
||||
var bin []string
|
||||
@@ -59,12 +59,12 @@ func TestMain(m *testing.M) {
|
||||
|
||||
func testMain(m *testing.M) int {
|
||||
if testing.Short() && os.Getenv("GO_BUILDER_NAME") == "" {
|
||||
globalSkip = func(t *testing.T) { t.Skip("short mode and $GO_BUILDER_NAME not set") }
|
||||
globalSkip = func(t testing.TB) { t.Skip("short mode and $GO_BUILDER_NAME not set") }
|
||||
return m.Run()
|
||||
}
|
||||
if runtime.GOOS == "linux" {
|
||||
if _, err := os.Stat("/etc/alpine-release"); err == nil {
|
||||
globalSkip = func(t *testing.T) { t.Skip("skipping failing test on alpine - go.dev/issue/19938") }
|
||||
globalSkip = func(t testing.TB) { t.Skip("skipping failing test on alpine - go.dev/issue/19938") }
|
||||
return m.Run()
|
||||
}
|
||||
}
|
||||
@@ -1291,8 +1291,8 @@ func TestPreemption(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// Issue 59294. Test calling Go function from C after using some
|
||||
// stack space.
|
||||
// Issue 59294 and 68285. Test calling Go function from C after with
|
||||
// various stack space.
|
||||
func TestDeepStack(t *testing.T) {
|
||||
globalSkip(t)
|
||||
testenv.MustHaveGoBuild(t)
|
||||
@@ -1350,6 +1350,53 @@ func TestDeepStack(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkCgoCallbackMainThread(b *testing.B) {
|
||||
// Benchmark for calling into Go fron C main thread.
|
||||
// See issue #68587.
|
||||
//
|
||||
// It uses a subprocess, which is a C binary that calls
|
||||
// Go on the main thread b.N times. There is some overhead
|
||||
// for launching the subprocess. It is probably fine when
|
||||
// b.N is large.
|
||||
|
||||
globalSkip(b)
|
||||
testenv.MustHaveGoBuild(b)
|
||||
testenv.MustHaveCGO(b)
|
||||
testenv.MustHaveBuildMode(b, "c-archive")
|
||||
|
||||
if !testWork {
|
||||
defer func() {
|
||||
os.Remove("testp10" + exeSuffix)
|
||||
os.Remove("libgo10.a")
|
||||
os.Remove("libgo10.h")
|
||||
}()
|
||||
}
|
||||
|
||||
cmd := exec.Command("go", "build", "-buildmode=c-archive", "-o", "libgo10.a", "./libgo10")
|
||||
out, err := cmd.CombinedOutput()
|
||||
b.Logf("%v\n%s", cmd.Args, out)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
ccArgs := append(cc, "-o", "testp10"+exeSuffix, "main10.c", "libgo10.a")
|
||||
out, err = exec.Command(ccArgs[0], ccArgs[1:]...).CombinedOutput()
|
||||
b.Logf("%v\n%s", ccArgs, out)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
argv := cmdToRun("./testp10")
|
||||
argv = append(argv, fmt.Sprint(b.N))
|
||||
cmd = exec.Command(argv[0], argv[1:]...)
|
||||
|
||||
b.ResetTimer()
|
||||
err = cmd.Run()
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSharedObject(t *testing.T) {
|
||||
// Test that we can put a Go c-archive into a C shared object.
|
||||
globalSkip(t)
|
||||
|
||||
12
src/cmd/cgo/internal/testcarchive/testdata/libgo10/a.go
vendored
Normal file
12
src/cmd/cgo/internal/testcarchive/testdata/libgo10/a.go
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
// Copyright 2024 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
import "C"
|
||||
|
||||
//export GoF
|
||||
func GoF() {}
|
||||
|
||||
func main() {}
|
||||
@@ -6,9 +6,29 @@ package main
|
||||
|
||||
import "runtime"
|
||||
|
||||
// extern void callGoWithVariousStack(int);
|
||||
import "C"
|
||||
|
||||
func main() {}
|
||||
|
||||
//export GoF
|
||||
func GoF() { runtime.GC() }
|
||||
func GoF(p int32) {
|
||||
runtime.GC()
|
||||
if p != 0 {
|
||||
panic("panic")
|
||||
}
|
||||
}
|
||||
|
||||
//export callGoWithVariousStackAndGoFrame
|
||||
func callGoWithVariousStackAndGoFrame(p int32) {
|
||||
if p != 0 {
|
||||
defer func() {
|
||||
e := recover()
|
||||
if e == nil {
|
||||
panic("did not panic")
|
||||
}
|
||||
runtime.GC()
|
||||
}()
|
||||
}
|
||||
C.callGoWithVariousStack(C.int(p));
|
||||
}
|
||||
|
||||
22
src/cmd/cgo/internal/testcarchive/testdata/main10.c
vendored
Normal file
22
src/cmd/cgo/internal/testcarchive/testdata/main10.c
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
// Copyright 2024 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
#include "libgo10.h"
|
||||
|
||||
int main(int argc, char **argv) {
|
||||
int n, i;
|
||||
|
||||
if (argc != 2) {
|
||||
perror("wrong arg");
|
||||
return 2;
|
||||
}
|
||||
n = atoi(argv[1]);
|
||||
for (i = 0; i < n; i++)
|
||||
GoF();
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -6,19 +6,27 @@
|
||||
|
||||
void use(int *x) { (*x)++; }
|
||||
|
||||
void callGoFWithDeepStack() {
|
||||
void callGoFWithDeepStack(int p) {
|
||||
int x[10000];
|
||||
|
||||
use(&x[0]);
|
||||
use(&x[9999]);
|
||||
|
||||
GoF();
|
||||
GoF(p);
|
||||
|
||||
use(&x[0]);
|
||||
use(&x[9999]);
|
||||
}
|
||||
|
||||
int main() {
|
||||
GoF(); // call GoF without using much stack
|
||||
callGoFWithDeepStack(); // call GoF with a deep stack
|
||||
void callGoWithVariousStack(int p) {
|
||||
GoF(0); // call GoF without using much stack
|
||||
callGoFWithDeepStack(p); // call GoF with a deep stack
|
||||
GoF(0); // again on a shallow stack
|
||||
}
|
||||
|
||||
int main() {
|
||||
callGoWithVariousStack(0);
|
||||
|
||||
callGoWithVariousStackAndGoFrame(0); // normal execution
|
||||
callGoWithVariousStackAndGoFrame(1); // panic and recover
|
||||
}
|
||||
|
||||
@@ -318,9 +318,10 @@ func containsClosure(f, c *ir.Func) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// Closures within function Foo are named like "Foo.funcN..." or "Foo-rangeN".
|
||||
// TODO(mdempsky): Better way to recognize this.
|
||||
fn := f.Sym().Name
|
||||
cn := c.Sym().Name
|
||||
return len(cn) > len(fn) && cn[:len(fn)] == fn && (cn[len(fn)] == '.' || cn[len(fn)] == '-')
|
||||
for p := c.ClosureParent; p != nil; p = p.ClosureParent {
|
||||
if p == f {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -582,6 +582,23 @@ func TestIssue25596(t *testing.T) {
|
||||
compileAndImportPkg(t, "issue25596")
|
||||
}
|
||||
|
||||
func TestIssue70394(t *testing.T) {
|
||||
testenv.MustHaveGoBuild(t)
|
||||
|
||||
// This package only handles gc export data.
|
||||
if runtime.Compiler != "gc" {
|
||||
t.Skipf("gc-built packages not available (compiler = %s)", runtime.Compiler)
|
||||
}
|
||||
|
||||
pkg := compileAndImportPkg(t, "alias")
|
||||
obj := lookupObj(t, pkg.Scope(), "A")
|
||||
|
||||
typ := obj.Type()
|
||||
if _, ok := typ.(*types2.Alias); !ok {
|
||||
t.Fatalf("type of %s is %s, wanted an alias", obj, typ)
|
||||
}
|
||||
}
|
||||
|
||||
func importPkg(t *testing.T, path, srcDir string) *types2.Package {
|
||||
pkg, err := Import(make(map[string]*types2.Package), path, srcDir, nil)
|
||||
if err != nil {
|
||||
|
||||
7
src/cmd/compile/internal/importer/testdata/alias.go
vendored
Normal file
7
src/cmd/compile/internal/importer/testdata/alias.go
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
// Copyright 2024 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package testdata
|
||||
|
||||
type A = int32
|
||||
@@ -29,11 +29,9 @@ func ReadPackage(ctxt *types2.Context, imports map[string]*types2.Package, input
|
||||
pr := pkgReader{
|
||||
PkgDecoder: input,
|
||||
|
||||
ctxt: ctxt,
|
||||
imports: imports,
|
||||
// Currently, the compiler panics when using Alias types.
|
||||
// TODO(gri) set to true once this is fixed (issue #66873)
|
||||
enableAlias: false,
|
||||
ctxt: ctxt,
|
||||
imports: imports,
|
||||
enableAlias: true,
|
||||
|
||||
posBases: make([]*syntax.PosBase, input.NumElems(pkgbits.RelocPosBase)),
|
||||
pkgs: make([]*types2.Package, input.NumElems(pkgbits.RelocPkg)),
|
||||
|
||||
@@ -51,6 +51,8 @@ import (
|
||||
// the generated ODCLFUNC, but there is no
|
||||
// pointer from the Func back to the OMETHVALUE.
|
||||
type Func struct {
|
||||
// if you add or remove a field, don't forget to update sizeof_test.go
|
||||
|
||||
miniNode
|
||||
Body Nodes
|
||||
|
||||
@@ -76,6 +78,9 @@ type Func struct {
|
||||
// Populated during walk.
|
||||
Closures []*Func
|
||||
|
||||
// Parent of a closure
|
||||
ClosureParent *Func
|
||||
|
||||
// Parents records the parent scope of each scope within a
|
||||
// function. The root scope (0) has no parent, so the i'th
|
||||
// scope's parent is stored at Parents[i-1].
|
||||
@@ -512,6 +517,7 @@ func NewClosureFunc(fpos, cpos src.XPos, why Op, typ *types.Type, outerfn *Func,
|
||||
|
||||
fn.Nname.Defn = fn
|
||||
pkg.Funcs = append(pkg.Funcs, fn)
|
||||
fn.ClosureParent = outerfn
|
||||
|
||||
return fn
|
||||
}
|
||||
|
||||
@@ -20,7 +20,7 @@ func TestSizeof(t *testing.T) {
|
||||
_32bit uintptr // size on 32bit platforms
|
||||
_64bit uintptr // size on 64bit platforms
|
||||
}{
|
||||
{Func{}, 176, 296},
|
||||
{Func{}, 180, 304},
|
||||
{Name{}, 96, 168},
|
||||
}
|
||||
|
||||
|
||||
@@ -2099,3 +2099,27 @@ func TestTwoLevelReturnCheck(t *testing.T) {
|
||||
t.Errorf("Expected y=3, got y=%d\n", y)
|
||||
}
|
||||
}
|
||||
|
||||
func Bug70035(s1, s2, s3 []string) string {
|
||||
var c1 string
|
||||
for v1 := range slices.Values(s1) {
|
||||
var c2 string
|
||||
for v2 := range slices.Values(s2) {
|
||||
var c3 string
|
||||
for v3 := range slices.Values(s3) {
|
||||
c3 = c3 + v3
|
||||
}
|
||||
c2 = c2 + v2 + c3
|
||||
}
|
||||
c1 = c1 + v1 + c2
|
||||
}
|
||||
return c1
|
||||
}
|
||||
|
||||
func Test70035(t *testing.T) {
|
||||
got := Bug70035([]string{"1", "2", "3"}, []string{"a", "b", "c"}, []string{"A", "B", "C"})
|
||||
want := "1aABCbABCcABC2aABCbABCcABC3aABCbABCcABC"
|
||||
if got != want {
|
||||
t.Errorf("got %v, want %v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -805,13 +805,19 @@ func elfwritefreebsdsig(out *OutBuf) int {
|
||||
return int(sh.Size)
|
||||
}
|
||||
|
||||
func addbuildinfo(val string) {
|
||||
func addbuildinfo(ctxt *Link) {
|
||||
val := *flagHostBuildid
|
||||
if val == "gobuildid" {
|
||||
buildID := *flagBuildid
|
||||
if buildID == "" {
|
||||
Exitf("-B gobuildid requires a Go build ID supplied via -buildid")
|
||||
}
|
||||
|
||||
if ctxt.IsDarwin() {
|
||||
buildinfo = uuidFromGoBuildId(buildID)
|
||||
return
|
||||
}
|
||||
|
||||
hashedBuildID := notsha256.Sum256([]byte(buildID))
|
||||
buildinfo = hashedBuildID[:20]
|
||||
|
||||
@@ -821,11 +827,13 @@ func addbuildinfo(val string) {
|
||||
if !strings.HasPrefix(val, "0x") {
|
||||
Exitf("-B argument must start with 0x: %s", val)
|
||||
}
|
||||
|
||||
ov := val
|
||||
val = val[2:]
|
||||
|
||||
const maxLen = 32
|
||||
maxLen := 32
|
||||
if ctxt.IsDarwin() {
|
||||
maxLen = 16
|
||||
}
|
||||
if hex.DecodedLen(len(val)) > maxLen {
|
||||
Exitf("-B option too long (max %d digits): %s", maxLen, ov)
|
||||
}
|
||||
|
||||
@@ -297,6 +297,8 @@ func getMachoHdr() *MachoHdr {
|
||||
return &machohdr
|
||||
}
|
||||
|
||||
// Create a new Mach-O load command. ndata is the number of 32-bit words for
|
||||
// the data (not including the load command header).
|
||||
func newMachoLoad(arch *sys.Arch, type_ uint32, ndata uint32) *MachoLoad {
|
||||
if arch.PtrSize == 8 && (ndata&1 != 0) {
|
||||
ndata++
|
||||
@@ -849,6 +851,20 @@ func asmbMacho(ctxt *Link) {
|
||||
}
|
||||
}
|
||||
|
||||
if ctxt.IsInternal() && len(buildinfo) > 0 {
|
||||
ml := newMachoLoad(ctxt.Arch, LC_UUID, 4)
|
||||
// Mach-O UUID is 16 bytes
|
||||
if len(buildinfo) < 16 {
|
||||
buildinfo = append(buildinfo, make([]byte, 16)...)
|
||||
}
|
||||
// By default, buildinfo is already in UUIDv3 format
|
||||
// (see uuidFromGoBuildId).
|
||||
ml.data[0] = ctxt.Arch.ByteOrder.Uint32(buildinfo)
|
||||
ml.data[1] = ctxt.Arch.ByteOrder.Uint32(buildinfo[4:])
|
||||
ml.data[2] = ctxt.Arch.ByteOrder.Uint32(buildinfo[8:])
|
||||
ml.data[3] = ctxt.Arch.ByteOrder.Uint32(buildinfo[12:])
|
||||
}
|
||||
|
||||
if ctxt.IsInternal() && ctxt.NeedCodeSign() {
|
||||
ml := newMachoLoad(ctxt.Arch, LC_CODE_SIGNATURE, 2)
|
||||
ml.data[0] = uint32(codesigOff)
|
||||
|
||||
@@ -42,7 +42,7 @@ func uuidFromGoBuildId(buildID string) []byte {
|
||||
// to use this UUID flavor than any of the others. This is similar
|
||||
// to how other linkers handle this (for example this code in lld:
|
||||
// https://github.com/llvm/llvm-project/blob/2a3a79ce4c2149d7787d56f9841b66cacc9061d0/lld/MachO/Writer.cpp#L524).
|
||||
rv[6] &= 0xcf
|
||||
rv[6] &= 0x0f
|
||||
rv[6] |= 0x30
|
||||
rv[8] &= 0x3f
|
||||
rv[8] |= 0xc0
|
||||
|
||||
@@ -95,6 +95,7 @@ var (
|
||||
flagN = flag.Bool("n", false, "no-op (deprecated)")
|
||||
FlagS = flag.Bool("s", false, "disable symbol table")
|
||||
flag8 bool // use 64-bit addresses in symbol table
|
||||
flagHostBuildid = flag.String("B", "", "set ELF NT_GNU_BUILD_ID `note` or Mach-O UUID; use \"gobuildid\" to generate it from the Go build ID")
|
||||
flagInterpreter = flag.String("I", "", "use `linker` as ELF dynamic linker")
|
||||
flagCheckLinkname = flag.Bool("checklinkname", true, "check linkname symbol references")
|
||||
FlagDebugTramp = flag.Int("debugtramp", 0, "debug trampolines")
|
||||
@@ -196,7 +197,6 @@ func Main(arch *sys.Arch, theArch Arch) {
|
||||
flag.Var(&ctxt.LinkMode, "linkmode", "set link `mode`")
|
||||
flag.Var(&ctxt.BuildMode, "buildmode", "set build `mode`")
|
||||
flag.BoolVar(&ctxt.compressDWARF, "compressdwarf", true, "compress DWARF if possible")
|
||||
objabi.Flagfn1("B", "add an ELF NT_GNU_BUILD_ID `note` when using ELF; use \"gobuildid\" to generate it from the Go build ID", addbuildinfo)
|
||||
objabi.Flagfn1("L", "add specified `directory` to library path", func(a string) { Lflag(ctxt, a) })
|
||||
objabi.AddVersionFlag() // -V
|
||||
objabi.Flagfn1("X", "add string value `definition` of the form importpath.name=value", func(s string) { addstrdata1(ctxt, s) })
|
||||
@@ -294,6 +294,10 @@ func Main(arch *sys.Arch, theArch Arch) {
|
||||
*flagBuildid = "go-openbsd"
|
||||
}
|
||||
|
||||
if *flagHostBuildid != "" {
|
||||
addbuildinfo(ctxt)
|
||||
}
|
||||
|
||||
// enable benchmarking
|
||||
var bench *benchmark.Metrics
|
||||
if len(*benchmarkFlag) != 0 {
|
||||
|
||||
@@ -257,6 +257,10 @@ func (gs *gState[R]) stop(ts trace.Time, stack trace.Stack, ctx *traceContext) {
|
||||
if gs.lastStopStack != trace.NoStack {
|
||||
stk = ctx.Stack(viewerFrames(gs.lastStopStack))
|
||||
}
|
||||
var endStk int
|
||||
if stack != trace.NoStack {
|
||||
endStk = ctx.Stack(viewerFrames(stack))
|
||||
}
|
||||
// Check invariants.
|
||||
if gs.startRunningTime == 0 {
|
||||
panic("silently broken trace or generator invariant (startRunningTime != 0) not held")
|
||||
@@ -270,6 +274,7 @@ func (gs *gState[R]) stop(ts trace.Time, stack trace.Stack, ctx *traceContext) {
|
||||
Dur: ts.Sub(gs.startRunningTime),
|
||||
Resource: uint64(gs.executing),
|
||||
Stack: stk,
|
||||
EndStack: endStk,
|
||||
})
|
||||
|
||||
// Flush completed ranges.
|
||||
|
||||
@@ -32,28 +32,46 @@ func SendFile(dstFD *FD, src int, pos, remain int64) (written int64, err error,
|
||||
if int64(n) > remain {
|
||||
n = int(remain)
|
||||
}
|
||||
m := n
|
||||
pos1 := pos
|
||||
n, err = syscall.Sendfile(dst, src, &pos1, n)
|
||||
if n > 0 {
|
||||
pos += int64(n)
|
||||
written += int64(n)
|
||||
remain -= int64(n)
|
||||
// (n, nil) indicates that sendfile(2) has transferred
|
||||
// the exact number of bytes we requested, or some unretryable
|
||||
// error have occurred with partial bytes sent. Either way, we
|
||||
// don't need to go through the following logic to check EINTR
|
||||
// or fell into dstFD.pd.waitWrite, just continue to send the
|
||||
// next chunk or break the loop.
|
||||
if n == m {
|
||||
continue
|
||||
} else if err != syscall.EAGAIN &&
|
||||
err != syscall.EINTR &&
|
||||
err != syscall.EBUSY {
|
||||
// Particularly, EPIPE. Errors like that would normally lead
|
||||
// the subsequent sendfile(2) call to (-1, EBADF).
|
||||
break
|
||||
}
|
||||
} else if err != syscall.EAGAIN && err != syscall.EINTR {
|
||||
// This includes syscall.ENOSYS (no kernel
|
||||
// support) and syscall.EINVAL (fd types which
|
||||
// don't implement sendfile), and other errors.
|
||||
// We should end the loop when there is no error
|
||||
// returned from sendfile(2) or it is not a retryable error.
|
||||
break
|
||||
}
|
||||
if err == syscall.EINTR {
|
||||
continue
|
||||
}
|
||||
// This includes syscall.ENOSYS (no kernel
|
||||
// support) and syscall.EINVAL (fd types which
|
||||
// don't implement sendfile), and other errors.
|
||||
// We should end the loop when there is no error
|
||||
// returned from sendfile(2) or it is not a retryable error.
|
||||
if err != syscall.EAGAIN {
|
||||
break
|
||||
}
|
||||
if err = dstFD.pd.waitWrite(dstFD.isFile); err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
handled = written != 0 || (err != syscall.ENOSYS && err != syscall.EINVAL)
|
||||
if err == syscall.EAGAIN {
|
||||
err = nil
|
||||
}
|
||||
handled = written != 0 || (err != syscall.ENOSYS && err != syscall.EINVAL && err != syscall.EOPNOTSUPP && err != syscall.ENOTSUP)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -50,6 +50,9 @@ func SendFile(dstFD *FD, src int, remain int64) (written int64, err error, handl
|
||||
break
|
||||
}
|
||||
}
|
||||
if err == syscall.EAGAIN {
|
||||
err = nil
|
||||
}
|
||||
handled = written != 0 || (err != syscall.ENOSYS && err != syscall.EINVAL)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -61,6 +61,9 @@ func SendFile(dstFD *FD, src int, pos, remain int64) (written int64, err error,
|
||||
break
|
||||
}
|
||||
}
|
||||
if err == syscall.EAGAIN {
|
||||
err = nil
|
||||
}
|
||||
handled = written != 0 || (err != syscall.ENOSYS && err != syscall.EINVAL)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -53,6 +53,9 @@ func sendFile(c *netFD, r io.Reader) (written int64, err error, handled bool) {
|
||||
if err != nil {
|
||||
return 0, err, false
|
||||
}
|
||||
if fi.Mode()&(fs.ModeSymlink|fs.ModeDevice|fs.ModeCharDevice|fs.ModeIrregular) != 0 {
|
||||
return 0, nil, false
|
||||
}
|
||||
|
||||
remain = fi.Size()
|
||||
}
|
||||
|
||||
86
src/net/sendfile_unix_test.go
Normal file
86
src/net/sendfile_unix_test.go
Normal file
@@ -0,0 +1,86 @@
|
||||
// Copyright 2024 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build unix
|
||||
|
||||
package net
|
||||
|
||||
import (
|
||||
"internal/testpty"
|
||||
"io"
|
||||
"os"
|
||||
"sync"
|
||||
"syscall"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// Issue 70763: test that we don't fail on sendfile from a tty.
|
||||
func TestCopyFromTTY(t *testing.T) {
|
||||
pty, ttyName, err := testpty.Open()
|
||||
if err != nil {
|
||||
t.Skipf("skipping test because pty open failed: %v", err)
|
||||
}
|
||||
defer pty.Close()
|
||||
|
||||
// Use syscall.Open so that the tty is blocking.
|
||||
ttyFD, err := syscall.Open(ttyName, syscall.O_RDWR, 0)
|
||||
if err != nil {
|
||||
t.Skipf("skipping test because tty open failed: %v", err)
|
||||
}
|
||||
defer syscall.Close(ttyFD)
|
||||
|
||||
tty := os.NewFile(uintptr(ttyFD), "tty")
|
||||
defer tty.Close()
|
||||
|
||||
ln := newLocalListener(t, "tcp")
|
||||
defer ln.Close()
|
||||
|
||||
ch := make(chan bool)
|
||||
|
||||
const data = "data\n"
|
||||
|
||||
var wg sync.WaitGroup
|
||||
defer wg.Wait()
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
conn, err := ln.Accept()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
buf := make([]byte, len(data))
|
||||
if _, err := io.ReadFull(conn, buf); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
ch <- true
|
||||
}()
|
||||
|
||||
conn, err := Dial("tcp", ln.Addr().String())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
if _, err := pty.Write([]byte(data)); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
<-ch
|
||||
if err := pty.Close(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
|
||||
lr := io.LimitReader(tty, int64(len(data)))
|
||||
if _, err := io.Copy(conn, lr); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
154
src/os/copy_test.go
Normal file
154
src/os/copy_test.go
Normal file
@@ -0,0 +1,154 @@
|
||||
// Copyright 2024 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package os_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"io"
|
||||
"math/rand/v2"
|
||||
"net"
|
||||
"os"
|
||||
"runtime"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"golang.org/x/net/nettest"
|
||||
)
|
||||
|
||||
// Exercise sendfile/splice fast paths with a moderately large file.
|
||||
//
|
||||
// https://go.dev/issue/70000
|
||||
|
||||
func TestLargeCopyViaNetwork(t *testing.T) {
|
||||
const size = 10 * 1024 * 1024
|
||||
dir := t.TempDir()
|
||||
|
||||
src, err := os.Create(dir + "/src")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer src.Close()
|
||||
if _, err := io.CopyN(src, newRandReader(), size); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := src.Seek(0, 0); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
dst, err := os.Create(dir + "/dst")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer dst.Close()
|
||||
|
||||
client, server := createSocketPair(t, "tcp")
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(2)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
if n, err := io.Copy(dst, server); n != size || err != nil {
|
||||
t.Errorf("copy to destination = %v, %v; want %v, nil", n, err, size)
|
||||
}
|
||||
}()
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
defer client.Close()
|
||||
if n, err := io.Copy(client, src); n != size || err != nil {
|
||||
t.Errorf("copy from source = %v, %v; want %v, nil", n, err, size)
|
||||
}
|
||||
}()
|
||||
wg.Wait()
|
||||
|
||||
if _, err := dst.Seek(0, 0); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := compareReaders(dst, io.LimitReader(newRandReader(), size)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func compareReaders(a, b io.Reader) error {
|
||||
bufa := make([]byte, 4096)
|
||||
bufb := make([]byte, 4096)
|
||||
for {
|
||||
na, erra := io.ReadFull(a, bufa)
|
||||
if erra != nil && erra != io.EOF {
|
||||
return erra
|
||||
}
|
||||
nb, errb := io.ReadFull(b, bufb)
|
||||
if errb != nil && errb != io.EOF {
|
||||
return errb
|
||||
}
|
||||
if !bytes.Equal(bufa[:na], bufb[:nb]) {
|
||||
return errors.New("contents mismatch")
|
||||
}
|
||||
if erra == io.EOF && errb == io.EOF {
|
||||
break
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type randReader struct {
|
||||
rand *rand.Rand
|
||||
}
|
||||
|
||||
func newRandReader() *randReader {
|
||||
return &randReader{rand.New(rand.NewPCG(0, 0))}
|
||||
}
|
||||
|
||||
func (r *randReader) Read(p []byte) (int, error) {
|
||||
var v uint64
|
||||
var n int
|
||||
for i := range p {
|
||||
if n == 0 {
|
||||
v = r.rand.Uint64()
|
||||
n = 8
|
||||
}
|
||||
p[i] = byte(v & 0xff)
|
||||
v >>= 8
|
||||
n--
|
||||
}
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
func createSocketPair(t *testing.T, proto string) (client, server net.Conn) {
|
||||
t.Helper()
|
||||
if !nettest.TestableNetwork(proto) {
|
||||
t.Skipf("%s does not support %q", runtime.GOOS, proto)
|
||||
}
|
||||
|
||||
ln, err := nettest.NewLocalListener(proto)
|
||||
if err != nil {
|
||||
t.Fatalf("NewLocalListener error: %v", err)
|
||||
}
|
||||
t.Cleanup(func() {
|
||||
if ln != nil {
|
||||
ln.Close()
|
||||
}
|
||||
if client != nil {
|
||||
client.Close()
|
||||
}
|
||||
if server != nil {
|
||||
server.Close()
|
||||
}
|
||||
})
|
||||
ch := make(chan struct{})
|
||||
go func() {
|
||||
var err error
|
||||
server, err = ln.Accept()
|
||||
if err != nil {
|
||||
t.Errorf("Accept new connection error: %v", err)
|
||||
}
|
||||
ch <- struct{}{}
|
||||
}()
|
||||
client, err = net.Dial(proto, ln.Addr().String())
|
||||
<-ch
|
||||
if err != nil {
|
||||
t.Fatalf("Dial new connection error: %v", err)
|
||||
}
|
||||
return client, server
|
||||
}
|
||||
@@ -8,12 +8,17 @@
|
||||
// v5.3: pidfd_open syscall, clone3 syscall;
|
||||
// v5.4: P_PIDFD idtype support for waitid syscall;
|
||||
// v5.6: pidfd_getfd syscall.
|
||||
//
|
||||
// N.B. Alternative Linux implementations may not follow this ordering. e.g.,
|
||||
// QEMU user mode 7.2 added pidfd_open, but CLONE_PIDFD was not added until
|
||||
// 8.0.
|
||||
|
||||
package os
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"internal/syscall/unix"
|
||||
"runtime"
|
||||
"sync"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
@@ -139,14 +144,21 @@ func pidfdWorks() bool {
|
||||
|
||||
var checkPidfdOnce = sync.OnceValue(checkPidfd)
|
||||
|
||||
// checkPidfd checks whether all required pidfd-related syscalls work.
|
||||
// This consists of pidfd_open and pidfd_send_signal syscalls, and waitid
|
||||
// syscall with idtype of P_PIDFD.
|
||||
// checkPidfd checks whether all required pidfd-related syscalls work. This
|
||||
// consists of pidfd_open and pidfd_send_signal syscalls, waitid syscall with
|
||||
// idtype of P_PIDFD, and clone(CLONE_PIDFD).
|
||||
//
|
||||
// Reasons for non-working pidfd syscalls include an older kernel and an
|
||||
// execution environment in which the above system calls are restricted by
|
||||
// seccomp or a similar technology.
|
||||
func checkPidfd() error {
|
||||
// In Android version < 12, pidfd-related system calls are not allowed
|
||||
// by seccomp and trigger the SIGSYS signal. See issue #69065.
|
||||
if runtime.GOOS == "android" {
|
||||
ignoreSIGSYS()
|
||||
defer restoreSIGSYS()
|
||||
}
|
||||
|
||||
// Get a pidfd of the current process (opening of "/proc/self" won't
|
||||
// work for waitid).
|
||||
fd, err := unix.PidFDOpen(syscall.Getpid(), 0)
|
||||
@@ -172,5 +184,27 @@ func checkPidfd() error {
|
||||
return NewSyscallError("pidfd_send_signal", err)
|
||||
}
|
||||
|
||||
// Verify that clone(CLONE_PIDFD) works.
|
||||
//
|
||||
// This shouldn't be necessary since pidfd_open was added in Linux 5.3,
|
||||
// after CLONE_PIDFD in Linux 5.2, but some alternative Linux
|
||||
// implementations may not adhere to this ordering.
|
||||
if err := checkClonePidfd(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Provided by syscall.
|
||||
//
|
||||
//go:linkname checkClonePidfd
|
||||
func checkClonePidfd() error
|
||||
|
||||
// Provided by runtime.
|
||||
//
|
||||
//go:linkname ignoreSIGSYS
|
||||
func ignoreSIGSYS()
|
||||
|
||||
//go:linkname restoreSIGSYS
|
||||
func restoreSIGSYS()
|
||||
|
||||
@@ -14,15 +14,12 @@ import (
|
||||
"net"
|
||||
. "os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/nettest"
|
||||
)
|
||||
|
||||
func TestCopyFileRange(t *testing.T) {
|
||||
@@ -784,41 +781,3 @@ func testGetPollFDAndNetwork(t *testing.T, proto string) {
|
||||
t.Fatalf("server Control error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func createSocketPair(t *testing.T, proto string) (client, server net.Conn) {
|
||||
t.Helper()
|
||||
if !nettest.TestableNetwork(proto) {
|
||||
t.Skipf("%s does not support %q", runtime.GOOS, proto)
|
||||
}
|
||||
|
||||
ln, err := nettest.NewLocalListener(proto)
|
||||
if err != nil {
|
||||
t.Fatalf("NewLocalListener error: %v", err)
|
||||
}
|
||||
t.Cleanup(func() {
|
||||
if ln != nil {
|
||||
ln.Close()
|
||||
}
|
||||
if client != nil {
|
||||
client.Close()
|
||||
}
|
||||
if server != nil {
|
||||
server.Close()
|
||||
}
|
||||
})
|
||||
ch := make(chan struct{})
|
||||
go func() {
|
||||
var err error
|
||||
server, err = ln.Accept()
|
||||
if err != nil {
|
||||
t.Errorf("Accept new connection error: %v", err)
|
||||
}
|
||||
ch <- struct{}{}
|
||||
}()
|
||||
client, err = net.Dial(proto, ln.Addr().String())
|
||||
<-ch
|
||||
if err != nil {
|
||||
t.Fatalf("Dial new connection error: %v", err)
|
||||
}
|
||||
return client, server
|
||||
}
|
||||
|
||||
@@ -31,10 +31,11 @@ x_cgo_getstackbound(uintptr bounds[2])
|
||||
pthread_attr_get_np(pthread_self(), &attr);
|
||||
pthread_attr_getstack(&attr, &addr, &size); // low address
|
||||
#else
|
||||
// We don't know how to get the current stacks, so assume they are the
|
||||
// same as the default stack bounds.
|
||||
pthread_attr_getstacksize(&attr, &size);
|
||||
addr = __builtin_frame_address(0) + 4096 - size;
|
||||
// We don't know how to get the current stacks, leave it as
|
||||
// 0 and the caller will use an estimate based on the current
|
||||
// SP.
|
||||
addr = 0;
|
||||
size = 0;
|
||||
#endif
|
||||
pthread_attr_destroy(&attr);
|
||||
|
||||
|
||||
@@ -231,34 +231,6 @@ func cgocall(fn, arg unsafe.Pointer) int32 {
|
||||
func callbackUpdateSystemStack(mp *m, sp uintptr, signal bool) {
|
||||
g0 := mp.g0
|
||||
|
||||
inBound := sp > g0.stack.lo && sp <= g0.stack.hi
|
||||
if mp.ncgo > 0 && !inBound {
|
||||
// ncgo > 0 indicates that this M was in Go further up the stack
|
||||
// (it called C and is now receiving a callback).
|
||||
//
|
||||
// !inBound indicates that we were called with SP outside the
|
||||
// expected system stack bounds (C changed the stack out from
|
||||
// under us between the cgocall and cgocallback?).
|
||||
//
|
||||
// It is not safe for the C call to change the stack out from
|
||||
// under us, so throw.
|
||||
|
||||
// Note that this case isn't possible for signal == true, as
|
||||
// that is always passing a new M from needm.
|
||||
|
||||
// Stack is bogus, but reset the bounds anyway so we can print.
|
||||
hi := g0.stack.hi
|
||||
lo := g0.stack.lo
|
||||
g0.stack.hi = sp + 1024
|
||||
g0.stack.lo = sp - 32*1024
|
||||
g0.stackguard0 = g0.stack.lo + stackGuard
|
||||
g0.stackguard1 = g0.stackguard0
|
||||
|
||||
print("M ", mp.id, " procid ", mp.procid, " runtime: cgocallback with sp=", hex(sp), " out of bounds [", hex(lo), ", ", hex(hi), "]")
|
||||
print("\n")
|
||||
exit(2)
|
||||
}
|
||||
|
||||
if !mp.isextra {
|
||||
// We allocated the stack for standard Ms. Don't replace the
|
||||
// stack bounds with estimated ones when we already initialized
|
||||
@@ -266,26 +238,37 @@ func callbackUpdateSystemStack(mp *m, sp uintptr, signal bool) {
|
||||
return
|
||||
}
|
||||
|
||||
// This M does not have Go further up the stack. However, it may have
|
||||
// previously called into Go, initializing the stack bounds. Between
|
||||
// that call returning and now the stack may have changed (perhaps the
|
||||
// C thread is running a coroutine library). We need to update the
|
||||
// stack bounds for this case.
|
||||
inBound := sp > g0.stack.lo && sp <= g0.stack.hi
|
||||
if inBound && mp.g0StackAccurate {
|
||||
// This M has called into Go before and has the stack bounds
|
||||
// initialized. We have the accurate stack bounds, and the SP
|
||||
// is in bounds. We expect it continues to run within the same
|
||||
// bounds.
|
||||
return
|
||||
}
|
||||
|
||||
// We don't have an accurate stack bounds (either it never calls
|
||||
// into Go before, or we couldn't get the accurate bounds), or the
|
||||
// current SP is not within the previous bounds (the stack may have
|
||||
// changed between calls). We need to update the stack bounds.
|
||||
//
|
||||
// N.B. we need to update the stack bounds even if SP appears to
|
||||
// already be in bounds. Our "bounds" may actually be estimated dummy
|
||||
// bounds (below). The actual stack bounds could have shifted but still
|
||||
// have partial overlap with our dummy bounds. If we failed to update
|
||||
// in that case, we could find ourselves seemingly called near the
|
||||
// bottom of the stack bounds, where we quickly run out of space.
|
||||
// already be in bounds, if our bounds are estimated dummy bounds
|
||||
// (below). We may be in a different region within the same actual
|
||||
// stack bounds, but our estimates were not accurate. Or the actual
|
||||
// stack bounds could have shifted but still have partial overlap with
|
||||
// our dummy bounds. If we failed to update in that case, we could find
|
||||
// ourselves seemingly called near the bottom of the stack bounds, where
|
||||
// we quickly run out of space.
|
||||
|
||||
// Set the stack bounds to match the current stack. If we don't
|
||||
// actually know how big the stack is, like we don't know how big any
|
||||
// scheduling stack is, but we assume there's at least 32 kB. If we
|
||||
// can get a more accurate stack bound from pthread, use that, provided
|
||||
// it actually contains SP..
|
||||
// it actually contains SP.
|
||||
g0.stack.hi = sp + 1024
|
||||
g0.stack.lo = sp - 32*1024
|
||||
mp.g0StackAccurate = false
|
||||
if !signal && _cgo_getstackbound != nil {
|
||||
// Don't adjust if called from the signal handler.
|
||||
// We are on the signal stack, not the pthread stack.
|
||||
@@ -296,12 +279,16 @@ func callbackUpdateSystemStack(mp *m, sp uintptr, signal bool) {
|
||||
asmcgocall(_cgo_getstackbound, unsafe.Pointer(&bounds))
|
||||
// getstackbound is an unsupported no-op on Windows.
|
||||
//
|
||||
// On Unix systems, if the API to get accurate stack bounds is
|
||||
// not available, it returns zeros.
|
||||
//
|
||||
// Don't use these bounds if they don't contain SP. Perhaps we
|
||||
// were called by something not using the standard thread
|
||||
// stack.
|
||||
if bounds[0] != 0 && sp > bounds[0] && sp <= bounds[1] {
|
||||
g0.stack.lo = bounds[0]
|
||||
g0.stack.hi = bounds[1]
|
||||
mp.g0StackAccurate = true
|
||||
}
|
||||
}
|
||||
g0.stackguard0 = g0.stack.lo + stackGuard
|
||||
@@ -319,6 +306,8 @@ func cgocallbackg(fn, frame unsafe.Pointer, ctxt uintptr) {
|
||||
}
|
||||
|
||||
sp := gp.m.g0.sched.sp // system sp saved by cgocallback.
|
||||
oldStack := gp.m.g0.stack
|
||||
oldAccurate := gp.m.g0StackAccurate
|
||||
callbackUpdateSystemStack(gp.m, sp, false)
|
||||
|
||||
// The call from C is on gp.m's g0 stack, so we must ensure
|
||||
@@ -380,6 +369,12 @@ func cgocallbackg(fn, frame unsafe.Pointer, ctxt uintptr) {
|
||||
reentersyscall(savedpc, uintptr(savedsp), uintptr(savedbp))
|
||||
|
||||
gp.m.winsyscall = winsyscall
|
||||
|
||||
// Restore the old g0 stack bounds
|
||||
gp.m.g0.stack = oldStack
|
||||
gp.m.g0.stackguard0 = oldStack.lo + stackGuard
|
||||
gp.m.g0.stackguard1 = gp.m.g0.stackguard0
|
||||
gp.m.g0StackAccurate = oldAccurate
|
||||
}
|
||||
|
||||
func cgocallbackg1(fn, frame unsafe.Pointer, ctxt uintptr) {
|
||||
|
||||
@@ -208,6 +208,18 @@ func coroswitch_m(gp *g) {
|
||||
// directly if possible.
|
||||
setGNoWB(&mp.curg, gnext)
|
||||
setMNoWB(&gnext.m, mp)
|
||||
|
||||
// Synchronize with any out-standing goroutine profile. We're about to start
|
||||
// executing, and an invariant of the profiler is that we tryRecordGoroutineProfile
|
||||
// whenever a goroutine is about to start running.
|
||||
//
|
||||
// N.B. We must do this before transitioning to _Grunning but after installing gnext
|
||||
// in curg, so that we have a valid curg for allocation (tryRecordGoroutineProfile
|
||||
// may allocate).
|
||||
if goroutineProfile.active {
|
||||
tryRecordGoroutineProfile(gnext, nil, osyield)
|
||||
}
|
||||
|
||||
if !gnext.atomicstatus.CompareAndSwap(_Gwaiting, _Grunning) {
|
||||
// The CAS failed: use casgstatus, which will take care of
|
||||
// coordinating with the garbage collector about the state change.
|
||||
|
||||
@@ -1886,3 +1886,30 @@ func (m *TraceMap) PutString(s string) (uint64, bool) {
|
||||
func (m *TraceMap) Reset() {
|
||||
m.traceMap.reset()
|
||||
}
|
||||
|
||||
func SetSpinInGCMarkDone(spin bool) {
|
||||
gcDebugMarkDone.spinAfterRaggedBarrier.Store(spin)
|
||||
}
|
||||
|
||||
func GCMarkDoneRestarted() bool {
|
||||
// Only read this outside of the GC. If we're running during a GC, just report false.
|
||||
mp := acquirem()
|
||||
if gcphase != _GCoff {
|
||||
releasem(mp)
|
||||
return false
|
||||
}
|
||||
restarted := gcDebugMarkDone.restartedDueTo27993
|
||||
releasem(mp)
|
||||
return restarted
|
||||
}
|
||||
|
||||
func GCMarkDoneResetRestartFlag() {
|
||||
mp := acquirem()
|
||||
for gcphase != _GCoff {
|
||||
releasem(mp)
|
||||
Gosched()
|
||||
mp = acquirem()
|
||||
}
|
||||
gcDebugMarkDone.restartedDueTo27993 = false
|
||||
releasem(mp)
|
||||
}
|
||||
|
||||
@@ -6,6 +6,8 @@ package runtime_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"internal/testenv"
|
||||
"internal/weak"
|
||||
"math/bits"
|
||||
"math/rand"
|
||||
"os"
|
||||
@@ -787,3 +789,78 @@ func TestMemoryLimitNoGCPercent(t *testing.T) {
|
||||
func TestMyGenericFunc(t *testing.T) {
|
||||
runtime.MyGenericFunc[int]()
|
||||
}
|
||||
|
||||
func TestWeakToStrongMarkTermination(t *testing.T) {
|
||||
testenv.MustHaveParallelism(t)
|
||||
|
||||
type T struct {
|
||||
a *int
|
||||
b int
|
||||
}
|
||||
defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2))
|
||||
defer debug.SetGCPercent(debug.SetGCPercent(-1))
|
||||
w := make([]weak.Pointer[T], 2048)
|
||||
|
||||
// Make sure there's no out-standing GC from a previous test.
|
||||
runtime.GC()
|
||||
|
||||
// Create many objects with a weak pointers to them.
|
||||
for i := range w {
|
||||
x := new(T)
|
||||
x.a = new(int)
|
||||
w[i] = weak.Make(x)
|
||||
}
|
||||
|
||||
// Reset the restart flag.
|
||||
runtime.GCMarkDoneResetRestartFlag()
|
||||
|
||||
// Prevent mark termination from completing.
|
||||
runtime.SetSpinInGCMarkDone(true)
|
||||
|
||||
// Start a GC, and wait a little bit to get something spinning in mark termination.
|
||||
// Simultaneously, fire off another goroutine to disable spinning. If everything's
|
||||
// working correctly, then weak.Strong will block, so we need to make sure something
|
||||
// prevents the GC from continuing to spin.
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
runtime.GC()
|
||||
done <- struct{}{}
|
||||
}()
|
||||
go func() {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
// Let mark termination continue.
|
||||
runtime.SetSpinInGCMarkDone(false)
|
||||
}()
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
// Perform many weak->strong conversions in the critical window.
|
||||
var wg sync.WaitGroup
|
||||
for _, wp := range w {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
wp.Strong()
|
||||
}()
|
||||
}
|
||||
|
||||
// Make sure the GC completes.
|
||||
<-done
|
||||
|
||||
// Make sure all the weak->strong conversions finish.
|
||||
wg.Wait()
|
||||
|
||||
// The bug is triggered if there's still mark work after gcMarkDone stops the world.
|
||||
//
|
||||
// This can manifest in one of two ways today:
|
||||
// - An exceedingly rare crash in mark termination.
|
||||
// - gcMarkDone restarts, as if issue #27993 is at play.
|
||||
//
|
||||
// Check for the latter. This is a fairly controlled environment, so #27993 is very
|
||||
// unlikely to happen (it's already rare to begin with) but we'll always _appear_ to
|
||||
// trigger the same bug if weak->strong conversions aren't properly coordinated with
|
||||
// mark termination.
|
||||
if runtime.GCMarkDoneRestarted() {
|
||||
t.Errorf("gcMarkDone restarted")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@ const (
|
||||
lockRankDefer
|
||||
lockRankSweepWaiters
|
||||
lockRankAssistQueue
|
||||
lockRankStrongFromWeakQueue
|
||||
lockRankSweep
|
||||
lockRankTestR
|
||||
lockRankTestW
|
||||
@@ -84,64 +85,65 @@ const lockRankLeafRank lockRank = 1000
|
||||
|
||||
// lockNames gives the names associated with each of the above ranks.
|
||||
var lockNames = []string{
|
||||
lockRankSysmon: "sysmon",
|
||||
lockRankScavenge: "scavenge",
|
||||
lockRankForcegc: "forcegc",
|
||||
lockRankDefer: "defer",
|
||||
lockRankSweepWaiters: "sweepWaiters",
|
||||
lockRankAssistQueue: "assistQueue",
|
||||
lockRankSweep: "sweep",
|
||||
lockRankTestR: "testR",
|
||||
lockRankTestW: "testW",
|
||||
lockRankTimerSend: "timerSend",
|
||||
lockRankAllocmW: "allocmW",
|
||||
lockRankExecW: "execW",
|
||||
lockRankCpuprof: "cpuprof",
|
||||
lockRankPollCache: "pollCache",
|
||||
lockRankPollDesc: "pollDesc",
|
||||
lockRankWakeableSleep: "wakeableSleep",
|
||||
lockRankHchan: "hchan",
|
||||
lockRankAllocmR: "allocmR",
|
||||
lockRankExecR: "execR",
|
||||
lockRankSched: "sched",
|
||||
lockRankAllg: "allg",
|
||||
lockRankAllp: "allp",
|
||||
lockRankNotifyList: "notifyList",
|
||||
lockRankSudog: "sudog",
|
||||
lockRankTimers: "timers",
|
||||
lockRankTimer: "timer",
|
||||
lockRankNetpollInit: "netpollInit",
|
||||
lockRankRoot: "root",
|
||||
lockRankItab: "itab",
|
||||
lockRankReflectOffs: "reflectOffs",
|
||||
lockRankUserArenaState: "userArenaState",
|
||||
lockRankTraceBuf: "traceBuf",
|
||||
lockRankTraceStrings: "traceStrings",
|
||||
lockRankFin: "fin",
|
||||
lockRankSpanSetSpine: "spanSetSpine",
|
||||
lockRankMspanSpecial: "mspanSpecial",
|
||||
lockRankTraceTypeTab: "traceTypeTab",
|
||||
lockRankGcBitsArenas: "gcBitsArenas",
|
||||
lockRankProfInsert: "profInsert",
|
||||
lockRankProfBlock: "profBlock",
|
||||
lockRankProfMemActive: "profMemActive",
|
||||
lockRankProfMemFuture: "profMemFuture",
|
||||
lockRankGscan: "gscan",
|
||||
lockRankStackpool: "stackpool",
|
||||
lockRankStackLarge: "stackLarge",
|
||||
lockRankHchanLeaf: "hchanLeaf",
|
||||
lockRankWbufSpans: "wbufSpans",
|
||||
lockRankMheap: "mheap",
|
||||
lockRankMheapSpecial: "mheapSpecial",
|
||||
lockRankGlobalAlloc: "globalAlloc",
|
||||
lockRankTrace: "trace",
|
||||
lockRankTraceStackTab: "traceStackTab",
|
||||
lockRankPanic: "panic",
|
||||
lockRankDeadlock: "deadlock",
|
||||
lockRankRaceFini: "raceFini",
|
||||
lockRankAllocmRInternal: "allocmRInternal",
|
||||
lockRankExecRInternal: "execRInternal",
|
||||
lockRankTestRInternal: "testRInternal",
|
||||
lockRankSysmon: "sysmon",
|
||||
lockRankScavenge: "scavenge",
|
||||
lockRankForcegc: "forcegc",
|
||||
lockRankDefer: "defer",
|
||||
lockRankSweepWaiters: "sweepWaiters",
|
||||
lockRankAssistQueue: "assistQueue",
|
||||
lockRankStrongFromWeakQueue: "strongFromWeakQueue",
|
||||
lockRankSweep: "sweep",
|
||||
lockRankTestR: "testR",
|
||||
lockRankTestW: "testW",
|
||||
lockRankTimerSend: "timerSend",
|
||||
lockRankAllocmW: "allocmW",
|
||||
lockRankExecW: "execW",
|
||||
lockRankCpuprof: "cpuprof",
|
||||
lockRankPollCache: "pollCache",
|
||||
lockRankPollDesc: "pollDesc",
|
||||
lockRankWakeableSleep: "wakeableSleep",
|
||||
lockRankHchan: "hchan",
|
||||
lockRankAllocmR: "allocmR",
|
||||
lockRankExecR: "execR",
|
||||
lockRankSched: "sched",
|
||||
lockRankAllg: "allg",
|
||||
lockRankAllp: "allp",
|
||||
lockRankNotifyList: "notifyList",
|
||||
lockRankSudog: "sudog",
|
||||
lockRankTimers: "timers",
|
||||
lockRankTimer: "timer",
|
||||
lockRankNetpollInit: "netpollInit",
|
||||
lockRankRoot: "root",
|
||||
lockRankItab: "itab",
|
||||
lockRankReflectOffs: "reflectOffs",
|
||||
lockRankUserArenaState: "userArenaState",
|
||||
lockRankTraceBuf: "traceBuf",
|
||||
lockRankTraceStrings: "traceStrings",
|
||||
lockRankFin: "fin",
|
||||
lockRankSpanSetSpine: "spanSetSpine",
|
||||
lockRankMspanSpecial: "mspanSpecial",
|
||||
lockRankTraceTypeTab: "traceTypeTab",
|
||||
lockRankGcBitsArenas: "gcBitsArenas",
|
||||
lockRankProfInsert: "profInsert",
|
||||
lockRankProfBlock: "profBlock",
|
||||
lockRankProfMemActive: "profMemActive",
|
||||
lockRankProfMemFuture: "profMemFuture",
|
||||
lockRankGscan: "gscan",
|
||||
lockRankStackpool: "stackpool",
|
||||
lockRankStackLarge: "stackLarge",
|
||||
lockRankHchanLeaf: "hchanLeaf",
|
||||
lockRankWbufSpans: "wbufSpans",
|
||||
lockRankMheap: "mheap",
|
||||
lockRankMheapSpecial: "mheapSpecial",
|
||||
lockRankGlobalAlloc: "globalAlloc",
|
||||
lockRankTrace: "trace",
|
||||
lockRankTraceStackTab: "traceStackTab",
|
||||
lockRankPanic: "panic",
|
||||
lockRankDeadlock: "deadlock",
|
||||
lockRankRaceFini: "raceFini",
|
||||
lockRankAllocmRInternal: "allocmRInternal",
|
||||
lockRankExecRInternal: "execRInternal",
|
||||
lockRankTestRInternal: "testRInternal",
|
||||
}
|
||||
|
||||
func (rank lockRank) String() string {
|
||||
@@ -163,62 +165,63 @@ func (rank lockRank) String() string {
|
||||
//
|
||||
// Lock ranks that allow self-cycles list themselves.
|
||||
var lockPartialOrder [][]lockRank = [][]lockRank{
|
||||
lockRankSysmon: {},
|
||||
lockRankScavenge: {lockRankSysmon},
|
||||
lockRankForcegc: {lockRankSysmon},
|
||||
lockRankDefer: {},
|
||||
lockRankSweepWaiters: {},
|
||||
lockRankAssistQueue: {},
|
||||
lockRankSweep: {},
|
||||
lockRankTestR: {},
|
||||
lockRankTestW: {},
|
||||
lockRankTimerSend: {},
|
||||
lockRankAllocmW: {},
|
||||
lockRankExecW: {},
|
||||
lockRankCpuprof: {},
|
||||
lockRankPollCache: {},
|
||||
lockRankPollDesc: {},
|
||||
lockRankWakeableSleep: {},
|
||||
lockRankHchan: {lockRankSysmon, lockRankScavenge, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankWakeableSleep, lockRankHchan},
|
||||
lockRankAllocmR: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan},
|
||||
lockRankExecR: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan},
|
||||
lockRankSched: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR},
|
||||
lockRankAllg: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched},
|
||||
lockRankAllp: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched},
|
||||
lockRankNotifyList: {},
|
||||
lockRankSudog: {lockRankSysmon, lockRankScavenge, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankWakeableSleep, lockRankHchan, lockRankNotifyList},
|
||||
lockRankTimers: {lockRankSysmon, lockRankScavenge, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankTimers},
|
||||
lockRankTimer: {lockRankSysmon, lockRankScavenge, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankTimers},
|
||||
lockRankNetpollInit: {lockRankSysmon, lockRankScavenge, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankTimers, lockRankTimer},
|
||||
lockRankRoot: {},
|
||||
lockRankItab: {},
|
||||
lockRankReflectOffs: {lockRankItab},
|
||||
lockRankUserArenaState: {},
|
||||
lockRankTraceBuf: {lockRankSysmon, lockRankScavenge},
|
||||
lockRankTraceStrings: {lockRankSysmon, lockRankScavenge, lockRankTraceBuf},
|
||||
lockRankFin: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings},
|
||||
lockRankSpanSetSpine: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings},
|
||||
lockRankMspanSpecial: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings},
|
||||
lockRankTraceTypeTab: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings},
|
||||
lockRankGcBitsArenas: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankMspanSpecial},
|
||||
lockRankProfInsert: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings},
|
||||
lockRankProfBlock: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings},
|
||||
lockRankProfMemActive: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings},
|
||||
lockRankProfMemFuture: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankProfMemActive},
|
||||
lockRankGscan: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture},
|
||||
lockRankStackpool: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan},
|
||||
lockRankStackLarge: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan},
|
||||
lockRankHchanLeaf: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankHchanLeaf},
|
||||
lockRankWbufSpans: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan},
|
||||
lockRankMheap: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans},
|
||||
lockRankMheapSpecial: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap},
|
||||
lockRankGlobalAlloc: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap, lockRankMheapSpecial},
|
||||
lockRankTrace: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap},
|
||||
lockRankTraceStackTab: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap, lockRankTrace},
|
||||
lockRankPanic: {},
|
||||
lockRankDeadlock: {lockRankPanic, lockRankDeadlock},
|
||||
lockRankRaceFini: {lockRankPanic},
|
||||
lockRankAllocmRInternal: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankAllocmW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR},
|
||||
lockRankExecRInternal: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankExecR},
|
||||
lockRankTestRInternal: {lockRankTestR, lockRankTestW},
|
||||
lockRankSysmon: {},
|
||||
lockRankScavenge: {lockRankSysmon},
|
||||
lockRankForcegc: {lockRankSysmon},
|
||||
lockRankDefer: {},
|
||||
lockRankSweepWaiters: {},
|
||||
lockRankAssistQueue: {},
|
||||
lockRankStrongFromWeakQueue: {},
|
||||
lockRankSweep: {},
|
||||
lockRankTestR: {},
|
||||
lockRankTestW: {},
|
||||
lockRankTimerSend: {},
|
||||
lockRankAllocmW: {},
|
||||
lockRankExecW: {},
|
||||
lockRankCpuprof: {},
|
||||
lockRankPollCache: {},
|
||||
lockRankPollDesc: {},
|
||||
lockRankWakeableSleep: {},
|
||||
lockRankHchan: {lockRankSysmon, lockRankScavenge, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankWakeableSleep, lockRankHchan},
|
||||
lockRankAllocmR: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan},
|
||||
lockRankExecR: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan},
|
||||
lockRankSched: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR},
|
||||
lockRankAllg: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched},
|
||||
lockRankAllp: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched},
|
||||
lockRankNotifyList: {},
|
||||
lockRankSudog: {lockRankSysmon, lockRankScavenge, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankWakeableSleep, lockRankHchan, lockRankNotifyList},
|
||||
lockRankTimers: {lockRankSysmon, lockRankScavenge, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankTimers},
|
||||
lockRankTimer: {lockRankSysmon, lockRankScavenge, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankTimers},
|
||||
lockRankNetpollInit: {lockRankSysmon, lockRankScavenge, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankTimers, lockRankTimer},
|
||||
lockRankRoot: {},
|
||||
lockRankItab: {},
|
||||
lockRankReflectOffs: {lockRankItab},
|
||||
lockRankUserArenaState: {},
|
||||
lockRankTraceBuf: {lockRankSysmon, lockRankScavenge},
|
||||
lockRankTraceStrings: {lockRankSysmon, lockRankScavenge, lockRankTraceBuf},
|
||||
lockRankFin: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings},
|
||||
lockRankSpanSetSpine: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings},
|
||||
lockRankMspanSpecial: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings},
|
||||
lockRankTraceTypeTab: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings},
|
||||
lockRankGcBitsArenas: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankMspanSpecial},
|
||||
lockRankProfInsert: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings},
|
||||
lockRankProfBlock: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings},
|
||||
lockRankProfMemActive: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings},
|
||||
lockRankProfMemFuture: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankProfMemActive},
|
||||
lockRankGscan: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture},
|
||||
lockRankStackpool: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan},
|
||||
lockRankStackLarge: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan},
|
||||
lockRankHchanLeaf: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankHchanLeaf},
|
||||
lockRankWbufSpans: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan},
|
||||
lockRankMheap: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans},
|
||||
lockRankMheapSpecial: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap},
|
||||
lockRankGlobalAlloc: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap, lockRankMheapSpecial},
|
||||
lockRankTrace: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap},
|
||||
lockRankTraceStackTab: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollCache, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankNotifyList, lockRankSudog, lockRankTimers, lockRankTimer, lockRankNetpollInit, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap, lockRankTrace},
|
||||
lockRankPanic: {},
|
||||
lockRankDeadlock: {lockRankPanic, lockRankDeadlock},
|
||||
lockRankRaceFini: {lockRankPanic},
|
||||
lockRankAllocmRInternal: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankAllocmW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankAllocmR},
|
||||
lockRankExecRInternal: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankStrongFromWeakQueue, lockRankSweep, lockRankTestR, lockRankTimerSend, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankHchan, lockRankExecR},
|
||||
lockRankTestRInternal: {lockRankTestR, lockRankTestW},
|
||||
}
|
||||
|
||||
@@ -190,6 +190,7 @@ func gcinit() {
|
||||
work.markDoneSema = 1
|
||||
lockInit(&work.sweepWaiters.lock, lockRankSweepWaiters)
|
||||
lockInit(&work.assistQueue.lock, lockRankAssistQueue)
|
||||
lockInit(&work.strongFromWeak.lock, lockRankStrongFromWeakQueue)
|
||||
lockInit(&work.wbufSpans.lock, lockRankWbufSpans)
|
||||
}
|
||||
|
||||
@@ -418,6 +419,26 @@ type workType struct {
|
||||
list gList
|
||||
}
|
||||
|
||||
// strongFromWeak controls how the GC interacts with weak->strong
|
||||
// pointer conversions.
|
||||
strongFromWeak struct {
|
||||
// block is a flag set during mark termination that prevents
|
||||
// new weak->strong conversions from executing by blocking the
|
||||
// goroutine and enqueuing it onto q.
|
||||
//
|
||||
// Mutated only by one goroutine at a time in gcMarkDone,
|
||||
// with globally-synchronizing events like forEachP and
|
||||
// stopTheWorld.
|
||||
block bool
|
||||
|
||||
// q is a queue of goroutines that attempted to perform a
|
||||
// weak->strong conversion during mark termination.
|
||||
//
|
||||
// Protected by lock.
|
||||
lock mutex
|
||||
q gQueue
|
||||
}
|
||||
|
||||
// cycles is the number of completed GC cycles, where a GC
|
||||
// cycle is sweep termination, mark, mark termination, and
|
||||
// sweep. This differs from memstats.numgc, which is
|
||||
@@ -800,6 +821,19 @@ func gcStart(trigger gcTrigger) {
|
||||
// This is protected by markDoneSema.
|
||||
var gcMarkDoneFlushed uint32
|
||||
|
||||
// gcDebugMarkDone contains fields used to debug/test mark termination.
|
||||
var gcDebugMarkDone struct {
|
||||
// spinAfterRaggedBarrier forces gcMarkDone to spin after it executes
|
||||
// the ragged barrier.
|
||||
spinAfterRaggedBarrier atomic.Bool
|
||||
|
||||
// restartedDueTo27993 indicates that we restarted mark termination
|
||||
// due to the bug described in issue #27993.
|
||||
//
|
||||
// Protected by worldsema.
|
||||
restartedDueTo27993 bool
|
||||
}
|
||||
|
||||
// gcMarkDone transitions the GC from mark to mark termination if all
|
||||
// reachable objects have been marked (that is, there are no grey
|
||||
// objects and can be no more in the future). Otherwise, it flushes
|
||||
@@ -842,6 +876,10 @@ top:
|
||||
// stop the world later, so acquire worldsema now.
|
||||
semacquire(&worldsema)
|
||||
|
||||
// Prevent weak->strong conversions from generating additional
|
||||
// GC work. forEachP will guarantee that it is observed globally.
|
||||
work.strongFromWeak.block = true
|
||||
|
||||
// Flush all local buffers and collect flushedWork flags.
|
||||
gcMarkDoneFlushed = 0
|
||||
forEachP(waitReasonGCMarkTermination, func(pp *p) {
|
||||
@@ -872,6 +910,10 @@ top:
|
||||
goto top
|
||||
}
|
||||
|
||||
// For debugging/testing.
|
||||
for gcDebugMarkDone.spinAfterRaggedBarrier.Load() {
|
||||
}
|
||||
|
||||
// There was no global work, no local work, and no Ps
|
||||
// communicated work since we took markDoneSema. Therefore
|
||||
// there are no grey objects and no more objects can be
|
||||
@@ -910,6 +952,8 @@ top:
|
||||
}
|
||||
})
|
||||
if restart {
|
||||
gcDebugMarkDone.restartedDueTo27993 = true
|
||||
|
||||
getg().m.preemptoff = ""
|
||||
systemstack(func() {
|
||||
// Accumulate the time we were stopped before we had to start again.
|
||||
@@ -936,6 +980,11 @@ top:
|
||||
// start the world again.
|
||||
gcWakeAllAssists()
|
||||
|
||||
// Wake all blocked weak->strong conversions. These will run
|
||||
// when we start the world again.
|
||||
work.strongFromWeak.block = false
|
||||
gcWakeAllStrongFromWeak()
|
||||
|
||||
// Likewise, release the transition lock. Blocked
|
||||
// workers and assists will run when we start the
|
||||
// world again.
|
||||
|
||||
@@ -2049,8 +2049,19 @@ func internal_weak_runtime_registerWeakPointer(p unsafe.Pointer) unsafe.Pointer
|
||||
func internal_weak_runtime_makeStrongFromWeak(u unsafe.Pointer) unsafe.Pointer {
|
||||
handle := (*atomic.Uintptr)(u)
|
||||
|
||||
// Prevent preemption. We want to make sure that another GC cycle can't start.
|
||||
// Prevent preemption. We want to make sure that another GC cycle can't start
|
||||
// and that work.strongFromWeak.block can't change out from under us.
|
||||
mp := acquirem()
|
||||
|
||||
// Yield to the GC if necessary.
|
||||
if work.strongFromWeak.block {
|
||||
releasem(mp)
|
||||
|
||||
// Try to park and wait for mark termination.
|
||||
// N.B. gcParkStrongFromWeak calls acquirem before returning.
|
||||
mp = gcParkStrongFromWeak()
|
||||
}
|
||||
|
||||
p := handle.Load()
|
||||
if p == 0 {
|
||||
releasem(mp)
|
||||
@@ -2092,6 +2103,41 @@ func internal_weak_runtime_makeStrongFromWeak(u unsafe.Pointer) unsafe.Pointer {
|
||||
return ptr
|
||||
}
|
||||
|
||||
// gcParkStrongFromWeak puts the current goroutine on the weak->strong queue and parks.
|
||||
func gcParkStrongFromWeak() *m {
|
||||
// Prevent preemption as we check strongFromWeak, so it can't change out from under us.
|
||||
mp := acquirem()
|
||||
|
||||
for work.strongFromWeak.block {
|
||||
lock(&work.strongFromWeak.lock)
|
||||
releasem(mp) // N.B. Holding the lock prevents preemption.
|
||||
|
||||
// Queue ourselves up.
|
||||
work.strongFromWeak.q.pushBack(getg())
|
||||
|
||||
// Park.
|
||||
goparkunlock(&work.strongFromWeak.lock, waitReasonGCWeakToStrongWait, traceBlockGCWeakToStrongWait, 2)
|
||||
|
||||
// Re-acquire the current M since we're going to check the condition again.
|
||||
mp = acquirem()
|
||||
|
||||
// Re-check condition. We may have awoken in the next GC's mark termination phase.
|
||||
}
|
||||
return mp
|
||||
}
|
||||
|
||||
// gcWakeAllStrongFromWeak wakes all currently blocked weak->strong
|
||||
// conversions. This is used at the end of a GC cycle.
|
||||
//
|
||||
// work.strongFromWeak.block must be false to prevent woken goroutines
|
||||
// from immediately going back to sleep.
|
||||
func gcWakeAllStrongFromWeak() {
|
||||
lock(&work.strongFromWeak.lock)
|
||||
list := work.strongFromWeak.q.popList()
|
||||
injectglist(&list)
|
||||
unlock(&work.strongFromWeak.lock)
|
||||
}
|
||||
|
||||
// Retrieves or creates a weak pointer handle for the object p.
|
||||
func getOrAddWeakHandle(p unsafe.Pointer) *atomic.Uintptr {
|
||||
// First try to retrieve without allocating.
|
||||
@@ -2126,8 +2172,14 @@ func getOrAddWeakHandle(p unsafe.Pointer) *atomic.Uintptr {
|
||||
|
||||
// Keep p alive for the duration of the function to ensure
|
||||
// that it cannot die while we're trying to do this.
|
||||
//
|
||||
// Same for handle, which is only stored in the special.
|
||||
// There's a window where it might die if we don't keep it
|
||||
// alive explicitly. Returning it here is probably good enough,
|
||||
// but let's be defensive and explicit. See #70455.
|
||||
KeepAlive(p)
|
||||
return s.handle
|
||||
KeepAlive(handle)
|
||||
return handle
|
||||
}
|
||||
|
||||
// There was an existing handle. Free the special
|
||||
@@ -2147,7 +2199,10 @@ func getOrAddWeakHandle(p unsafe.Pointer) *atomic.Uintptr {
|
||||
|
||||
// Keep p alive for the duration of the function to ensure
|
||||
// that it cannot die while we're trying to do this.
|
||||
//
|
||||
// Same for handle, just to be defensive.
|
||||
KeepAlive(p)
|
||||
KeepAlive(handle)
|
||||
return handle
|
||||
}
|
||||
|
||||
|
||||
@@ -50,6 +50,7 @@ NONE < defer;
|
||||
NONE <
|
||||
sweepWaiters,
|
||||
assistQueue,
|
||||
strongFromWeakQueue,
|
||||
sweep;
|
||||
|
||||
# Test only
|
||||
@@ -66,6 +67,7 @@ assistQueue,
|
||||
hchan,
|
||||
pollDesc, # pollDesc can interact with timers, which can lock sched.
|
||||
scavenge,
|
||||
strongFromWeakQueue,
|
||||
sweep,
|
||||
sweepWaiters,
|
||||
testR,
|
||||
|
||||
@@ -1136,11 +1136,12 @@ func expandFrames(p []BlockProfileRecord) {
|
||||
for i := range p {
|
||||
cf := CallersFrames(p[i].Stack())
|
||||
j := 0
|
||||
for ; j < len(expandedStack); j++ {
|
||||
for j < len(expandedStack) {
|
||||
f, more := cf.Next()
|
||||
// f.PC is a "call PC", but later consumers will expect
|
||||
// "return PCs"
|
||||
expandedStack[j] = f.PC + 1
|
||||
j++
|
||||
if !more {
|
||||
break
|
||||
}
|
||||
@@ -1270,7 +1271,8 @@ func pprof_mutexProfileInternal(p []profilerecord.BlockProfileRecord) (n int, ok
|
||||
// of calling ThreadCreateProfile directly.
|
||||
func ThreadCreateProfile(p []StackRecord) (n int, ok bool) {
|
||||
return threadCreateProfileInternal(len(p), func(r profilerecord.StackRecord) {
|
||||
copy(p[0].Stack0[:], r.Stack)
|
||||
i := copy(p[0].Stack0[:], r.Stack)
|
||||
clear(p[0].Stack0[i:])
|
||||
p = p[1:]
|
||||
})
|
||||
}
|
||||
@@ -1649,7 +1651,8 @@ func GoroutineProfile(p []StackRecord) (n int, ok bool) {
|
||||
return
|
||||
}
|
||||
for i, mr := range records[0:n] {
|
||||
copy(p[i].Stack0[:], mr.Stack)
|
||||
l := copy(p[i].Stack0[:], mr.Stack)
|
||||
clear(p[i].Stack0[l:])
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -879,8 +879,9 @@ func runPerThreadSyscall() {
|
||||
}
|
||||
|
||||
const (
|
||||
_SI_USER = 0
|
||||
_SI_TKILL = -6
|
||||
_SI_USER = 0
|
||||
_SI_TKILL = -6
|
||||
_SYS_SECCOMP = 1
|
||||
)
|
||||
|
||||
// sigFromUser reports whether the signal was sent because of a call
|
||||
@@ -892,6 +893,14 @@ func (c *sigctxt) sigFromUser() bool {
|
||||
return code == _SI_USER || code == _SI_TKILL
|
||||
}
|
||||
|
||||
// sigFromSeccomp reports whether the signal was sent from seccomp.
|
||||
//
|
||||
//go:nosplit
|
||||
func (c *sigctxt) sigFromSeccomp() bool {
|
||||
code := int32(c.sigcode())
|
||||
return code == _SYS_SECCOMP
|
||||
}
|
||||
|
||||
//go:nosplit
|
||||
func mprotect(addr unsafe.Pointer, n uintptr, prot int32) (ret int32, errno int32) {
|
||||
r, _, err := syscall.Syscall6(syscall.SYS_MPROTECT, uintptr(addr), n, uintptr(prot), 0, 0, 0)
|
||||
|
||||
@@ -13,3 +13,10 @@ package runtime
|
||||
func (c *sigctxt) sigFromUser() bool {
|
||||
return c.sigcode() == _SI_USER
|
||||
}
|
||||
|
||||
// sigFromSeccomp reports whether the signal was sent from seccomp.
|
||||
//
|
||||
//go:nosplit
|
||||
func (c *sigctxt) sigFromSeccomp() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -145,7 +145,7 @@ func TestMemoryProfiler(t *testing.T) {
|
||||
}
|
||||
t.Logf("Profile = %v", p)
|
||||
|
||||
stks := stacks(p)
|
||||
stks := profileStacks(p)
|
||||
for _, test := range tests {
|
||||
if !containsStack(stks, test.stk) {
|
||||
t.Fatalf("No matching stack entry for %q\n\nProfile:\n%v\n", test.stk, p)
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
"internal/syscall/unix"
|
||||
"internal/testenv"
|
||||
"io"
|
||||
"iter"
|
||||
"math"
|
||||
"math/big"
|
||||
"os"
|
||||
@@ -981,7 +982,7 @@ func TestBlockProfile(t *testing.T) {
|
||||
t.Fatalf("invalid profile: %v", err)
|
||||
}
|
||||
|
||||
stks := stacks(p)
|
||||
stks := profileStacks(p)
|
||||
for _, test := range tests {
|
||||
if !containsStack(stks, test.stk) {
|
||||
t.Errorf("No matching stack entry for %v, want %+v", test.name, test.stk)
|
||||
@@ -991,7 +992,7 @@ func TestBlockProfile(t *testing.T) {
|
||||
|
||||
}
|
||||
|
||||
func stacks(p *profile.Profile) (res [][]string) {
|
||||
func profileStacks(p *profile.Profile) (res [][]string) {
|
||||
for _, s := range p.Sample {
|
||||
var stk []string
|
||||
for _, l := range s.Location {
|
||||
@@ -1004,6 +1005,22 @@ func stacks(p *profile.Profile) (res [][]string) {
|
||||
return res
|
||||
}
|
||||
|
||||
func blockRecordStacks(records []runtime.BlockProfileRecord) (res [][]string) {
|
||||
for _, record := range records {
|
||||
frames := runtime.CallersFrames(record.Stack())
|
||||
var stk []string
|
||||
for {
|
||||
frame, more := frames.Next()
|
||||
stk = append(stk, frame.Function)
|
||||
if !more {
|
||||
break
|
||||
}
|
||||
}
|
||||
res = append(res, stk)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func containsStack(got [][]string, want []string) bool {
|
||||
for _, stk := range got {
|
||||
if len(stk) < len(want) {
|
||||
@@ -1288,7 +1305,7 @@ func TestMutexProfile(t *testing.T) {
|
||||
t.Fatalf("invalid profile: %v", err)
|
||||
}
|
||||
|
||||
stks := stacks(p)
|
||||
stks := profileStacks(p)
|
||||
for _, want := range [][]string{
|
||||
{"sync.(*Mutex).Unlock", "runtime/pprof.blockMutexN.func1"},
|
||||
} {
|
||||
@@ -1328,6 +1345,28 @@ func TestMutexProfile(t *testing.T) {
|
||||
t.Fatalf("profile samples total %v, want within range [%v, %v] (target: %v)", d, lo, hi, N*D)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("records", func(t *testing.T) {
|
||||
// Record a mutex profile using the structured record API.
|
||||
var records []runtime.BlockProfileRecord
|
||||
for {
|
||||
n, ok := runtime.MutexProfile(records)
|
||||
if ok {
|
||||
records = records[:n]
|
||||
break
|
||||
}
|
||||
records = make([]runtime.BlockProfileRecord, n*2)
|
||||
}
|
||||
|
||||
// Check that we see the same stack trace as the proto profile. For
|
||||
// historical reason we expect a runtime.goexit root frame here that is
|
||||
// omitted in the proto profile.
|
||||
stks := blockRecordStacks(records)
|
||||
want := []string{"sync.(*Mutex).Unlock", "runtime/pprof.blockMutexN.func1", "runtime.goexit"}
|
||||
if !containsStack(stks, want) {
|
||||
t.Errorf("No matching stack entry for %+v", want)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestMutexProfileRateAdjust(t *testing.T) {
|
||||
@@ -1754,6 +1793,50 @@ func TestGoroutineProfileConcurrency(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// Regression test for #69998.
|
||||
func TestGoroutineProfileCoro(t *testing.T) {
|
||||
testenv.MustHaveParallelism(t)
|
||||
|
||||
goroutineProf := Lookup("goroutine")
|
||||
|
||||
// Set up a goroutine to just create and run coroutine goroutines all day.
|
||||
iterFunc := func() {
|
||||
p, stop := iter.Pull2(
|
||||
func(yield func(int, int) bool) {
|
||||
for i := 0; i < 10000; i++ {
|
||||
if !yield(i, i) {
|
||||
return
|
||||
}
|
||||
}
|
||||
},
|
||||
)
|
||||
defer stop()
|
||||
for {
|
||||
_, _, ok := p()
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
var wg sync.WaitGroup
|
||||
done := make(chan struct{})
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for {
|
||||
iterFunc()
|
||||
select {
|
||||
case <-done:
|
||||
default:
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Take a goroutine profile. If the bug in #69998 is present, this will crash
|
||||
// with high probability. We don't care about the output for this bug.
|
||||
goroutineProf.WriteTo(io.Discard, 1)
|
||||
}
|
||||
|
||||
func BenchmarkGoroutine(b *testing.B) {
|
||||
withIdle := func(n int, fn func(b *testing.B)) func(b *testing.B) {
|
||||
return func(b *testing.B) {
|
||||
@@ -2441,16 +2524,7 @@ func TestTimeVDSO(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestProfilerStackDepth(t *testing.T) {
|
||||
// Disable sampling, otherwise it's difficult to assert anything.
|
||||
oldMemRate := runtime.MemProfileRate
|
||||
runtime.MemProfileRate = 1
|
||||
runtime.SetBlockProfileRate(1)
|
||||
oldMutexRate := runtime.SetMutexProfileFraction(1)
|
||||
t.Cleanup(func() {
|
||||
runtime.MemProfileRate = oldMemRate
|
||||
runtime.SetBlockProfileRate(0)
|
||||
runtime.SetMutexProfileFraction(oldMutexRate)
|
||||
})
|
||||
t.Cleanup(disableSampling())
|
||||
|
||||
const depth = 128
|
||||
go produceProfileEvents(t, depth)
|
||||
@@ -2478,7 +2552,7 @@ func TestProfilerStackDepth(t *testing.T) {
|
||||
}
|
||||
t.Logf("Profile = %v", p)
|
||||
|
||||
stks := stacks(p)
|
||||
stks := profileStacks(p)
|
||||
var stk []string
|
||||
for _, s := range stks {
|
||||
if hasPrefix(s, test.prefix) {
|
||||
@@ -2742,3 +2816,84 @@ runtime/pprof.inlineA`,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestProfileRecordNullPadding(t *testing.T) {
|
||||
// Produce events for the different profile types.
|
||||
t.Cleanup(disableSampling())
|
||||
memSink = make([]byte, 1) // MemProfile
|
||||
<-time.After(time.Millisecond) // BlockProfile
|
||||
blockMutex(t) // MutexProfile
|
||||
runtime.GC()
|
||||
|
||||
// Test that all profile records are null padded.
|
||||
testProfileRecordNullPadding(t, "MutexProfile", runtime.MutexProfile)
|
||||
testProfileRecordNullPadding(t, "GoroutineProfile", runtime.GoroutineProfile)
|
||||
testProfileRecordNullPadding(t, "BlockProfile", runtime.BlockProfile)
|
||||
testProfileRecordNullPadding(t, "MemProfile/inUseZero=true", func(p []runtime.MemProfileRecord) (int, bool) {
|
||||
return runtime.MemProfile(p, true)
|
||||
})
|
||||
testProfileRecordNullPadding(t, "MemProfile/inUseZero=false", func(p []runtime.MemProfileRecord) (int, bool) {
|
||||
return runtime.MemProfile(p, false)
|
||||
})
|
||||
// Not testing ThreadCreateProfile because it is broken, see issue 6104.
|
||||
}
|
||||
|
||||
func testProfileRecordNullPadding[T runtime.StackRecord | runtime.MemProfileRecord | runtime.BlockProfileRecord](t *testing.T, name string, fn func([]T) (int, bool)) {
|
||||
stack0 := func(sr *T) *[32]uintptr {
|
||||
switch t := any(sr).(type) {
|
||||
case *runtime.StackRecord:
|
||||
return &t.Stack0
|
||||
case *runtime.MemProfileRecord:
|
||||
return &t.Stack0
|
||||
case *runtime.BlockProfileRecord:
|
||||
return &t.Stack0
|
||||
default:
|
||||
panic(fmt.Sprintf("unexpected type %T", sr))
|
||||
}
|
||||
}
|
||||
|
||||
t.Run(name, func(t *testing.T) {
|
||||
var p []T
|
||||
for {
|
||||
n, ok := fn(p)
|
||||
if ok {
|
||||
p = p[:n]
|
||||
break
|
||||
}
|
||||
p = make([]T, n*2)
|
||||
for i := range p {
|
||||
s0 := stack0(&p[i])
|
||||
for j := range s0 {
|
||||
// Poison the Stack0 array to identify lack of zero padding
|
||||
s0[j] = ^uintptr(0)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(p) == 0 {
|
||||
t.Fatal("no records found")
|
||||
}
|
||||
|
||||
for _, sr := range p {
|
||||
for i, v := range stack0(&sr) {
|
||||
if v == ^uintptr(0) {
|
||||
t.Fatalf("record p[%d].Stack0 is not null padded: %+v", i, sr)
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// disableSampling configures the profilers to capture all events, otherwise
|
||||
// it's difficult to assert anything.
|
||||
func disableSampling() func() {
|
||||
oldMemRate := runtime.MemProfileRate
|
||||
runtime.MemProfileRate = 1
|
||||
runtime.SetBlockProfileRate(1)
|
||||
oldMutexRate := runtime.SetMutexProfileFraction(1)
|
||||
return func() {
|
||||
runtime.MemProfileRate = oldMemRate
|
||||
runtime.SetBlockProfileRate(0)
|
||||
runtime.SetMutexProfileFraction(oldMutexRate)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2539,6 +2539,7 @@ func dropm() {
|
||||
g0.stack.lo = 0
|
||||
g0.stackguard0 = 0
|
||||
g0.stackguard1 = 0
|
||||
mp.g0StackAccurate = false
|
||||
|
||||
putExtraM(mp)
|
||||
|
||||
|
||||
@@ -575,15 +575,15 @@ func TestGdbAutotmpTypes(t *testing.T) {
|
||||
|
||||
// Check that the backtrace matches the source code.
|
||||
types := []string{
|
||||
"[]main.astruct;",
|
||||
"bucket<string,main.astruct>;",
|
||||
"hash<string,main.astruct>;",
|
||||
"main.astruct;",
|
||||
"hash<string,main.astruct> * map[string]main.astruct;",
|
||||
"[]main.astruct",
|
||||
"bucket<string,main.astruct>",
|
||||
"hash<string,main.astruct>",
|
||||
"main.astruct",
|
||||
"hash<string,main.astruct> * map[string]main.astruct",
|
||||
}
|
||||
for _, name := range types {
|
||||
if !strings.Contains(sgot, name) {
|
||||
t.Fatalf("could not find %s in 'info typrs astruct' output", name)
|
||||
t.Fatalf("could not find %q in 'info typrs astruct' output", name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -556,47 +556,48 @@ type m struct {
|
||||
_ uint32 // align next field to 8 bytes
|
||||
|
||||
// Fields not known to debuggers.
|
||||
procid uint64 // for debuggers, but offset not hard-coded
|
||||
gsignal *g // signal-handling g
|
||||
goSigStack gsignalStack // Go-allocated signal handling stack
|
||||
sigmask sigset // storage for saved signal mask
|
||||
tls [tlsSlots]uintptr // thread-local storage (for x86 extern register)
|
||||
mstartfn func()
|
||||
curg *g // current running goroutine
|
||||
caughtsig guintptr // goroutine running during fatal signal
|
||||
p puintptr // attached p for executing go code (nil if not executing go code)
|
||||
nextp puintptr
|
||||
oldp puintptr // the p that was attached before executing a syscall
|
||||
id int64
|
||||
mallocing int32
|
||||
throwing throwType
|
||||
preemptoff string // if != "", keep curg running on this m
|
||||
locks int32
|
||||
dying int32
|
||||
profilehz int32
|
||||
spinning bool // m is out of work and is actively looking for work
|
||||
blocked bool // m is blocked on a note
|
||||
newSigstack bool // minit on C thread called sigaltstack
|
||||
printlock int8
|
||||
incgo bool // m is executing a cgo call
|
||||
isextra bool // m is an extra m
|
||||
isExtraInC bool // m is an extra m that is not executing Go code
|
||||
isExtraInSig bool // m is an extra m in a signal handler
|
||||
freeWait atomic.Uint32 // Whether it is safe to free g0 and delete m (one of freeMRef, freeMStack, freeMWait)
|
||||
needextram bool
|
||||
traceback uint8
|
||||
ncgocall uint64 // number of cgo calls in total
|
||||
ncgo int32 // number of cgo calls currently in progress
|
||||
cgoCallersUse atomic.Uint32 // if non-zero, cgoCallers in use temporarily
|
||||
cgoCallers *cgoCallers // cgo traceback if crashing in cgo call
|
||||
park note
|
||||
alllink *m // on allm
|
||||
schedlink muintptr
|
||||
lockedg guintptr
|
||||
createstack [32]uintptr // stack that created this thread, it's used for StackRecord.Stack0, so it must align with it.
|
||||
lockedExt uint32 // tracking for external LockOSThread
|
||||
lockedInt uint32 // tracking for internal lockOSThread
|
||||
nextwaitm muintptr // next m waiting for lock
|
||||
procid uint64 // for debuggers, but offset not hard-coded
|
||||
gsignal *g // signal-handling g
|
||||
goSigStack gsignalStack // Go-allocated signal handling stack
|
||||
sigmask sigset // storage for saved signal mask
|
||||
tls [tlsSlots]uintptr // thread-local storage (for x86 extern register)
|
||||
mstartfn func()
|
||||
curg *g // current running goroutine
|
||||
caughtsig guintptr // goroutine running during fatal signal
|
||||
p puintptr // attached p for executing go code (nil if not executing go code)
|
||||
nextp puintptr
|
||||
oldp puintptr // the p that was attached before executing a syscall
|
||||
id int64
|
||||
mallocing int32
|
||||
throwing throwType
|
||||
preemptoff string // if != "", keep curg running on this m
|
||||
locks int32
|
||||
dying int32
|
||||
profilehz int32
|
||||
spinning bool // m is out of work and is actively looking for work
|
||||
blocked bool // m is blocked on a note
|
||||
newSigstack bool // minit on C thread called sigaltstack
|
||||
printlock int8
|
||||
incgo bool // m is executing a cgo call
|
||||
isextra bool // m is an extra m
|
||||
isExtraInC bool // m is an extra m that is not executing Go code
|
||||
isExtraInSig bool // m is an extra m in a signal handler
|
||||
freeWait atomic.Uint32 // Whether it is safe to free g0 and delete m (one of freeMRef, freeMStack, freeMWait)
|
||||
needextram bool
|
||||
g0StackAccurate bool // whether the g0 stack has accurate bounds
|
||||
traceback uint8
|
||||
ncgocall uint64 // number of cgo calls in total
|
||||
ncgo int32 // number of cgo calls currently in progress
|
||||
cgoCallersUse atomic.Uint32 // if non-zero, cgoCallers in use temporarily
|
||||
cgoCallers *cgoCallers // cgo traceback if crashing in cgo call
|
||||
park note
|
||||
alllink *m // on allm
|
||||
schedlink muintptr
|
||||
lockedg guintptr
|
||||
createstack [32]uintptr // stack that created this thread, it's used for StackRecord.Stack0, so it must align with it.
|
||||
lockedExt uint32 // tracking for external LockOSThread
|
||||
lockedInt uint32 // tracking for internal lockOSThread
|
||||
nextwaitm muintptr // next m waiting for lock
|
||||
|
||||
mLockProfile mLockProfile // fields relating to runtime.lock contention
|
||||
profStack []uintptr // used for memory/block/mutex stack traces
|
||||
@@ -1095,6 +1096,7 @@ const (
|
||||
waitReasonTraceProcStatus // "trace proc status"
|
||||
waitReasonPageTraceFlush // "page trace flush"
|
||||
waitReasonCoroutine // "coroutine"
|
||||
waitReasonGCWeakToStrongWait // "GC weak to strong wait"
|
||||
)
|
||||
|
||||
var waitReasonStrings = [...]string{
|
||||
@@ -1135,6 +1137,7 @@ var waitReasonStrings = [...]string{
|
||||
waitReasonTraceProcStatus: "trace proc status",
|
||||
waitReasonPageTraceFlush: "page trace flush",
|
||||
waitReasonCoroutine: "coroutine",
|
||||
waitReasonGCWeakToStrongWait: "GC weak to strong wait",
|
||||
}
|
||||
|
||||
func (w waitReason) String() string {
|
||||
|
||||
@@ -605,6 +605,19 @@ var crashing atomic.Int32
|
||||
var testSigtrap func(info *siginfo, ctxt *sigctxt, gp *g) bool
|
||||
var testSigusr1 func(gp *g) bool
|
||||
|
||||
// sigsysIgnored is non-zero if we are currently ignoring SIGSYS. See issue #69065.
|
||||
var sigsysIgnored uint32
|
||||
|
||||
//go:linkname ignoreSIGSYS os.ignoreSIGSYS
|
||||
func ignoreSIGSYS() {
|
||||
atomic.Store(&sigsysIgnored, 1)
|
||||
}
|
||||
|
||||
//go:linkname restoreSIGSYS os.restoreSIGSYS
|
||||
func restoreSIGSYS() {
|
||||
atomic.Store(&sigsysIgnored, 0)
|
||||
}
|
||||
|
||||
// sighandler is invoked when a signal occurs. The global g will be
|
||||
// set to a gsignal goroutine and we will be running on the alternate
|
||||
// signal stack. The parameter gp will be the value of the global g
|
||||
@@ -715,6 +728,10 @@ func sighandler(sig uint32, info *siginfo, ctxt unsafe.Pointer, gp *g) {
|
||||
return
|
||||
}
|
||||
|
||||
if sig == _SIGSYS && c.sigFromSeccomp() && atomic.Load(&sigsysIgnored) != 0 {
|
||||
return
|
||||
}
|
||||
|
||||
if flags&_SigKill != 0 {
|
||||
dieFromSignal(sig)
|
||||
}
|
||||
|
||||
@@ -69,7 +69,7 @@ const (
|
||||
// to each stack below the usual guard area for OS-specific
|
||||
// purposes like signal handling. Used on Windows, Plan 9,
|
||||
// and iOS because they do not use a separate stack.
|
||||
stackSystem = goos.IsWindows*512*goarch.PtrSize + goos.IsPlan9*512 + goos.IsIos*goarch.IsArm64*1024
|
||||
stackSystem = goos.IsWindows*4096 + goos.IsPlan9*512 + goos.IsIos*goarch.IsArm64*1024
|
||||
|
||||
// The minimum size of stack used by Go code
|
||||
stackMin = 2048
|
||||
@@ -1330,7 +1330,7 @@ func morestackc() {
|
||||
}
|
||||
|
||||
// startingStackSize is the amount of stack that new goroutines start with.
|
||||
// It is a power of 2, and between _FixedStack and maxstacksize, inclusive.
|
||||
// It is a power of 2, and between fixedStack and maxstacksize, inclusive.
|
||||
// startingStackSize is updated every GC by tracking the average size of
|
||||
// stacks scanned during the GC.
|
||||
var startingStackSize uint32 = fixedStack
|
||||
|
||||
@@ -467,43 +467,37 @@ func syscall_getprocaddress(handle uintptr, procname *byte) (outhandle, err uint
|
||||
//go:linkname syscall_Syscall syscall.Syscall
|
||||
//go:nosplit
|
||||
func syscall_Syscall(fn, nargs, a1, a2, a3 uintptr) (r1, r2, err uintptr) {
|
||||
args := [...]uintptr{a1, a2, a3}
|
||||
return syscall_SyscallN(fn, args[:nargs]...)
|
||||
return syscall_syscalln(fn, nargs, a1, a2, a3)
|
||||
}
|
||||
|
||||
//go:linkname syscall_Syscall6 syscall.Syscall6
|
||||
//go:nosplit
|
||||
func syscall_Syscall6(fn, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) {
|
||||
args := [...]uintptr{a1, a2, a3, a4, a5, a6}
|
||||
return syscall_SyscallN(fn, args[:nargs]...)
|
||||
return syscall_syscalln(fn, nargs, a1, a2, a3, a4, a5, a6)
|
||||
}
|
||||
|
||||
//go:linkname syscall_Syscall9 syscall.Syscall9
|
||||
//go:nosplit
|
||||
func syscall_Syscall9(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2, err uintptr) {
|
||||
args := [...]uintptr{a1, a2, a3, a4, a5, a6, a7, a8, a9}
|
||||
return syscall_SyscallN(fn, args[:nargs]...)
|
||||
return syscall_syscalln(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9)
|
||||
}
|
||||
|
||||
//go:linkname syscall_Syscall12 syscall.Syscall12
|
||||
//go:nosplit
|
||||
func syscall_Syscall12(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12 uintptr) (r1, r2, err uintptr) {
|
||||
args := [...]uintptr{a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12}
|
||||
return syscall_SyscallN(fn, args[:nargs]...)
|
||||
return syscall_syscalln(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12)
|
||||
}
|
||||
|
||||
//go:linkname syscall_Syscall15 syscall.Syscall15
|
||||
//go:nosplit
|
||||
func syscall_Syscall15(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15 uintptr) (r1, r2, err uintptr) {
|
||||
args := [...]uintptr{a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15}
|
||||
return syscall_SyscallN(fn, args[:nargs]...)
|
||||
return syscall_syscalln(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15)
|
||||
}
|
||||
|
||||
//go:linkname syscall_Syscall18 syscall.Syscall18
|
||||
//go:nosplit
|
||||
func syscall_Syscall18(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18 uintptr) (r1, r2, err uintptr) {
|
||||
args := [...]uintptr{a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18}
|
||||
return syscall_SyscallN(fn, args[:nargs]...)
|
||||
return syscall_syscalln(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18)
|
||||
}
|
||||
|
||||
// maxArgs should be divisible by 2, as Windows stack
|
||||
@@ -516,7 +510,15 @@ const maxArgs = 42
|
||||
//go:linkname syscall_SyscallN syscall.SyscallN
|
||||
//go:nosplit
|
||||
func syscall_SyscallN(fn uintptr, args ...uintptr) (r1, r2, err uintptr) {
|
||||
if len(args) > maxArgs {
|
||||
return syscall_syscalln(fn, uintptr(len(args)), args...)
|
||||
}
|
||||
|
||||
//go:nosplit
|
||||
func syscall_syscalln(fn, n uintptr, args ...uintptr) (r1, r2, err uintptr) {
|
||||
if n > uintptr(len(args)) {
|
||||
panic("syscall: n > len(args)") // should not be reachable from user code
|
||||
}
|
||||
if n > maxArgs {
|
||||
panic("runtime: SyscallN has too many arguments")
|
||||
}
|
||||
|
||||
@@ -525,7 +527,7 @@ func syscall_SyscallN(fn uintptr, args ...uintptr) (r1, r2, err uintptr) {
|
||||
// calls back into Go.
|
||||
c := &getg().m.winsyscall
|
||||
c.fn = fn
|
||||
c.n = uintptr(len(args))
|
||||
c.n = n
|
||||
if c.n != 0 {
|
||||
c.args = uintptr(noescape(unsafe.Pointer(&args[0])))
|
||||
}
|
||||
|
||||
@@ -1215,6 +1215,13 @@ func TestBigStackCallbackSyscall(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestSyscallStackUsage(t *testing.T) {
|
||||
// Test that the stack usage of a syscall doesn't exceed the limit.
|
||||
// See https://go.dev/issue/69813.
|
||||
syscall.Syscall15(procSetEvent.Addr(), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
|
||||
syscall.Syscall18(procSetEvent.Addr(), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
|
||||
}
|
||||
|
||||
// wantLoadLibraryEx reports whether we expect LoadLibraryEx to work for tests.
|
||||
func wantLoadLibraryEx() bool {
|
||||
return testenv.Builder() != "" && (runtime.GOARCH == "amd64" || runtime.GOARCH == "386")
|
||||
|
||||
@@ -30,35 +30,6 @@ type timer struct {
|
||||
state uint8 // state bits
|
||||
isChan bool // timer has a channel; immutable; can be read without lock
|
||||
|
||||
// isSending is used to handle races between running a
|
||||
// channel timer and stopping or resetting the timer.
|
||||
// It is used only for channel timers (t.isChan == true).
|
||||
// The lowest zero bit is set when about to send a value on the channel,
|
||||
// and cleared after sending the value.
|
||||
// The stop/reset code uses this to detect whether it
|
||||
// stopped the channel send.
|
||||
//
|
||||
// An isSending bit is set only when t.mu is held.
|
||||
// An isSending bit is cleared only when t.sendLock is held.
|
||||
// isSending is read only when both t.mu and t.sendLock are held.
|
||||
//
|
||||
// Setting and clearing Uint8 bits handles the case of
|
||||
// a timer that is reset concurrently with unlockAndRun.
|
||||
// If the reset timer runs immediately, we can wind up with
|
||||
// concurrent calls to unlockAndRun for the same timer.
|
||||
// Using matched bit set and clear in unlockAndRun
|
||||
// ensures that the value doesn't get temporarily out of sync.
|
||||
//
|
||||
// We use a uint8 to keep the timer struct small.
|
||||
// This means that we can only support up to 8 concurrent
|
||||
// runs of a timer, where a concurrent run can only occur if
|
||||
// we start a run, unlock the timer, the timer is reset to a new
|
||||
// value (or the ticker fires again), it is ready to run,
|
||||
// and it is actually run, all before the first run completes.
|
||||
// Since completing a run is fast, even 2 concurrent timer runs are
|
||||
// nearly impossible, so this should be safe in practice.
|
||||
isSending atomic.Uint8
|
||||
|
||||
blocked uint32 // number of goroutines blocked on timer's channel
|
||||
|
||||
// Timer wakes up at when, and then at when+period, ... (period > 0 only)
|
||||
@@ -98,6 +69,20 @@ type timer struct {
|
||||
// sendLock protects sends on the timer's channel.
|
||||
// Not used for async (pre-Go 1.23) behavior when debug.asynctimerchan.Load() != 0.
|
||||
sendLock mutex
|
||||
|
||||
// isSending is used to handle races between running a
|
||||
// channel timer and stopping or resetting the timer.
|
||||
// It is used only for channel timers (t.isChan == true).
|
||||
// It is not used for tickers.
|
||||
// The value is incremented when about to send a value on the channel,
|
||||
// and decremented after sending the value.
|
||||
// The stop/reset code uses this to detect whether it
|
||||
// stopped the channel send.
|
||||
//
|
||||
// isSending is incremented only when t.mu is held.
|
||||
// isSending is decremented only when t.sendLock is held.
|
||||
// isSending is read only when both t.mu and t.sendLock are held.
|
||||
isSending atomic.Int32
|
||||
}
|
||||
|
||||
// init initializes a newly allocated timer t.
|
||||
@@ -467,7 +452,7 @@ func (t *timer) stop() bool {
|
||||
// send from actually happening. That means
|
||||
// that we should return true: the timer was
|
||||
// stopped, even though t.when may be zero.
|
||||
if t.isSending.Load() > 0 {
|
||||
if t.period == 0 && t.isSending.Load() > 0 {
|
||||
pending = true
|
||||
}
|
||||
}
|
||||
@@ -529,6 +514,7 @@ func (t *timer) modify(when, period int64, f func(arg any, seq uintptr, delay in
|
||||
t.maybeRunAsync()
|
||||
}
|
||||
t.trace("modify")
|
||||
oldPeriod := t.period
|
||||
t.period = period
|
||||
if f != nil {
|
||||
t.f = f
|
||||
@@ -570,7 +556,7 @@ func (t *timer) modify(when, period int64, f func(arg any, seq uintptr, delay in
|
||||
// send from actually happening. That means
|
||||
// that we should return true: the timer was
|
||||
// stopped, even though t.when may be zero.
|
||||
if t.isSending.Load() > 0 {
|
||||
if oldPeriod == 0 && t.isSending.Load() > 0 {
|
||||
pending = true
|
||||
}
|
||||
}
|
||||
@@ -1063,20 +1049,11 @@ func (t *timer) unlockAndRun(now int64) {
|
||||
}
|
||||
|
||||
async := debug.asynctimerchan.Load() != 0
|
||||
var isSendingClear uint8
|
||||
if !async && t.isChan {
|
||||
if !async && t.isChan && t.period == 0 {
|
||||
// Tell Stop/Reset that we are sending a value.
|
||||
// Set the lowest zero bit.
|
||||
// We do this awkward step because atomic.Uint8
|
||||
// doesn't support Add or CompareAndSwap.
|
||||
// We only set bits with t locked.
|
||||
v := t.isSending.Load()
|
||||
i := sys.TrailingZeros8(^v)
|
||||
if i == 8 {
|
||||
if t.isSending.Add(1) < 0 {
|
||||
throw("too many concurrent timer firings")
|
||||
}
|
||||
isSendingClear = 1 << i
|
||||
t.isSending.Or(isSendingClear)
|
||||
}
|
||||
|
||||
t.unlock()
|
||||
@@ -1114,6 +1091,16 @@ func (t *timer) unlockAndRun(now int64) {
|
||||
// started to send the value. That lets them correctly return
|
||||
// true meaning that no value was sent.
|
||||
lock(&t.sendLock)
|
||||
|
||||
if t.period == 0 {
|
||||
// We are committed to possibly sending a value
|
||||
// based on seq, so no need to keep telling
|
||||
// stop/modify that we are sending.
|
||||
if t.isSending.Add(-1) < 0 {
|
||||
throw("mismatched isSending updates")
|
||||
}
|
||||
}
|
||||
|
||||
if t.seq != seq {
|
||||
f = func(any, uintptr, int64) {}
|
||||
}
|
||||
@@ -1122,9 +1109,6 @@ func (t *timer) unlockAndRun(now int64) {
|
||||
f(arg, seq, delay)
|
||||
|
||||
if !async && t.isChan {
|
||||
// We are no longer sending a value.
|
||||
t.isSending.And(^isSendingClear)
|
||||
|
||||
unlock(&t.sendLock)
|
||||
}
|
||||
|
||||
|
||||
@@ -99,24 +99,26 @@ const (
|
||||
traceBlockDebugCall
|
||||
traceBlockUntilGCEnds
|
||||
traceBlockSleep
|
||||
traceBlockGCWeakToStrongWait
|
||||
)
|
||||
|
||||
var traceBlockReasonStrings = [...]string{
|
||||
traceBlockGeneric: "unspecified",
|
||||
traceBlockForever: "forever",
|
||||
traceBlockNet: "network",
|
||||
traceBlockSelect: "select",
|
||||
traceBlockCondWait: "sync.(*Cond).Wait",
|
||||
traceBlockSync: "sync",
|
||||
traceBlockChanSend: "chan send",
|
||||
traceBlockChanRecv: "chan receive",
|
||||
traceBlockGCMarkAssist: "GC mark assist wait for work",
|
||||
traceBlockGCSweep: "GC background sweeper wait",
|
||||
traceBlockSystemGoroutine: "system goroutine wait",
|
||||
traceBlockPreempted: "preempted",
|
||||
traceBlockDebugCall: "wait for debug call",
|
||||
traceBlockUntilGCEnds: "wait until GC ends",
|
||||
traceBlockSleep: "sleep",
|
||||
traceBlockGeneric: "unspecified",
|
||||
traceBlockForever: "forever",
|
||||
traceBlockNet: "network",
|
||||
traceBlockSelect: "select",
|
||||
traceBlockCondWait: "sync.(*Cond).Wait",
|
||||
traceBlockSync: "sync",
|
||||
traceBlockChanSend: "chan send",
|
||||
traceBlockChanRecv: "chan receive",
|
||||
traceBlockGCMarkAssist: "GC mark assist wait for work",
|
||||
traceBlockGCSweep: "GC background sweeper wait",
|
||||
traceBlockSystemGoroutine: "system goroutine wait",
|
||||
traceBlockPreempted: "preempted",
|
||||
traceBlockDebugCall: "wait for debug call",
|
||||
traceBlockUntilGCEnds: "wait until GC ends",
|
||||
traceBlockSleep: "sleep",
|
||||
traceBlockGCWeakToStrongWait: "GC weak to strong wait",
|
||||
}
|
||||
|
||||
// traceGoStopReason is an enumeration of reasons a goroutine might yield.
|
||||
|
||||
@@ -42,6 +42,7 @@ func Syscall15(trap, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a
|
||||
// Deprecated: Use [SyscallN] instead.
|
||||
func Syscall18(trap, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18 uintptr) (r1, r2 uintptr, err Errno)
|
||||
|
||||
//go:noescape
|
||||
func SyscallN(trap uintptr, args ...uintptr) (r1, r2 uintptr, err Errno)
|
||||
func loadlibrary(filename *uint16) (handle uintptr, err Errno)
|
||||
func loadsystemlibrary(filename *uint16, absoluteFilepath *uint16) (handle uintptr, err Errno)
|
||||
|
||||
@@ -7,6 +7,7 @@
|
||||
package syscall
|
||||
|
||||
import (
|
||||
errpkg "errors"
|
||||
"internal/itoa"
|
||||
"runtime"
|
||||
"unsafe"
|
||||
@@ -328,6 +329,7 @@ func forkAndExecInChild1(argv0 *byte, argv, envv []*byte, chroot, dir *byte, att
|
||||
if clone3 != nil {
|
||||
pid, err1 = rawVforkSyscall(_SYS_clone3, uintptr(unsafe.Pointer(clone3)), unsafe.Sizeof(*clone3), 0)
|
||||
} else {
|
||||
// N.B. Keep in sync with doCheckClonePidfd.
|
||||
flags |= uintptr(SIGCHLD)
|
||||
if runtime.GOARCH == "s390x" {
|
||||
// On Linux/s390, the first two arguments of clone(2) are swapped.
|
||||
@@ -743,3 +745,82 @@ func forkAndExecFailureCleanup(attr *ProcAttr, sys *SysProcAttr) {
|
||||
*sys.PidFD = -1
|
||||
}
|
||||
}
|
||||
|
||||
// checkClonePidfd verifies that clone(CLONE_PIDFD) works by actually doing a
|
||||
// clone.
|
||||
//
|
||||
//go:linkname os_checkClonePidfd os.checkClonePidfd
|
||||
func os_checkClonePidfd() error {
|
||||
pidfd := int32(-1)
|
||||
pid, errno := doCheckClonePidfd(&pidfd)
|
||||
if errno != 0 {
|
||||
return errno
|
||||
}
|
||||
|
||||
if pidfd == -1 {
|
||||
// Bad: CLONE_PIDFD failed to provide a pidfd. Reap the process
|
||||
// before returning.
|
||||
|
||||
var err error
|
||||
for {
|
||||
var status WaitStatus
|
||||
_, err = Wait4(int(pid), &status, 0, nil)
|
||||
if err != EINTR {
|
||||
break
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return errpkg.New("clone(CLONE_PIDFD) failed to return pidfd")
|
||||
}
|
||||
|
||||
// Good: CLONE_PIDFD provided a pidfd. Reap the process and close the
|
||||
// pidfd.
|
||||
defer Close(int(pidfd))
|
||||
|
||||
for {
|
||||
const _P_PIDFD = 3
|
||||
_, _, errno = Syscall6(SYS_WAITID, _P_PIDFD, uintptr(pidfd), 0, WEXITED, 0, 0)
|
||||
if errno != EINTR {
|
||||
break
|
||||
}
|
||||
}
|
||||
if errno != 0 {
|
||||
return errno
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// doCheckClonePidfd implements the actual clone call of os_checkClonePidfd and
|
||||
// child execution. This is a separate function so we can separate the child's
|
||||
// and parent's stack frames if we're using vfork.
|
||||
//
|
||||
// This is go:noinline because the point is to keep the stack frames of this
|
||||
// and os_checkClonePidfd separate.
|
||||
//
|
||||
//go:noinline
|
||||
func doCheckClonePidfd(pidfd *int32) (pid uintptr, errno Errno) {
|
||||
flags := uintptr(CLONE_VFORK|CLONE_VM|CLONE_PIDFD|SIGCHLD)
|
||||
if runtime.GOARCH == "s390x" {
|
||||
// On Linux/s390, the first two arguments of clone(2) are swapped.
|
||||
pid, errno = rawVforkSyscall(SYS_CLONE, 0, flags, uintptr(unsafe.Pointer(pidfd)))
|
||||
} else {
|
||||
pid, errno = rawVforkSyscall(SYS_CLONE, flags, 0, uintptr(unsafe.Pointer(pidfd)))
|
||||
}
|
||||
if errno != 0 || pid != 0 {
|
||||
// If we're in the parent, we must return immediately
|
||||
// so we're not in the same stack frame as the child.
|
||||
// This can at most use the return PC, which the child
|
||||
// will not modify, and the results of
|
||||
// rawVforkSyscall, which must have been written after
|
||||
// the child was replaced.
|
||||
return
|
||||
}
|
||||
|
||||
for {
|
||||
RawSyscall(SYS_EXIT_GROUP, 0, 0, 0)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -213,6 +213,51 @@ func TestGetStartupInfo(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestSyscallAllocations(t *testing.T) {
|
||||
testenv.SkipIfOptimizationOff(t)
|
||||
|
||||
// Test that syscall.SyscallN arguments do not escape.
|
||||
// The function used (in this case GetVersion) doesn't matter
|
||||
// as long as it is always available and doesn't panic.
|
||||
h, err := syscall.LoadLibrary("kernel32.dll")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer syscall.FreeLibrary(h)
|
||||
proc, err := syscall.GetProcAddress(h, "GetVersion")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
testAllocs := func(t *testing.T, name string, fn func() error) {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
n := int(testing.AllocsPerRun(10, func() {
|
||||
if err := fn(); err != nil {
|
||||
t.Fatalf("%s: %v", name, err)
|
||||
}
|
||||
}))
|
||||
if n > 0 {
|
||||
t.Errorf("allocs = %d, want 0", n)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
testAllocs(t, "SyscallN", func() error {
|
||||
r0, _, e1 := syscall.SyscallN(proc, 0, 0, 0)
|
||||
if r0 == 0 {
|
||||
return syscall.Errno(e1)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
testAllocs(t, "Syscall", func() error {
|
||||
r0, _, e1 := syscall.Syscall(proc, 3, 0, 0, 0)
|
||||
if r0 == 0 {
|
||||
return syscall.Errno(e1)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func FuzzUTF16FromString(f *testing.F) {
|
||||
f.Add("hi") // ASCII
|
||||
f.Add("â") // latin1
|
||||
|
||||
@@ -847,6 +847,57 @@ func testStopResetResultGODEBUG(t *testing.T, testStop bool, godebug string) {
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
// Test having a large number of goroutines wake up a ticker simultaneously.
|
||||
// This used to trigger a crash when run under x/tools/cmd/stress.
|
||||
func TestMultiWakeupTicker(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("-short")
|
||||
}
|
||||
|
||||
goroutines := runtime.GOMAXPROCS(0)
|
||||
timer := NewTicker(Microsecond)
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(goroutines)
|
||||
for range goroutines {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for range 100000 {
|
||||
select {
|
||||
case <-timer.C:
|
||||
case <-After(Millisecond):
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
// Test having a large number of goroutines wake up a timer simultaneously.
|
||||
// This used to trigger a crash when run under x/tools/cmd/stress.
|
||||
func TestMultiWakeupTimer(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("-short")
|
||||
}
|
||||
|
||||
goroutines := runtime.GOMAXPROCS(0)
|
||||
timer := NewTimer(Nanosecond)
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(goroutines)
|
||||
for range goroutines {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for range 10000 {
|
||||
select {
|
||||
case <-timer.C:
|
||||
default:
|
||||
}
|
||||
timer.Reset(Nanosecond)
|
||||
}
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
// Benchmark timer latency when the thread that creates the timer is busy with
|
||||
// other work and the timers must be serviced by other threads.
|
||||
// https://golang.org/issue/38860
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"math/rand"
|
||||
"os"
|
||||
"runtime"
|
||||
"slices"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
@@ -1084,10 +1085,15 @@ func TestLoadFixed(t *testing.T) {
|
||||
// So GMT+1 corresponds to -3600 in the Go zone, not +3600.
|
||||
name, offset := Now().In(loc).Zone()
|
||||
// The zone abbreviation is "-01" since tzdata-2016g, and "GMT+1"
|
||||
// on earlier versions; we accept both. (Issue #17276).
|
||||
if !(name == "GMT+1" || name == "-01") || offset != -1*60*60 {
|
||||
t.Errorf("Now().In(loc).Zone() = %q, %d, want %q or %q, %d",
|
||||
name, offset, "GMT+1", "-01", -1*60*60)
|
||||
// on earlier versions; we accept both. (Issue 17276.)
|
||||
wantName := []string{"GMT+1", "-01"}
|
||||
// The zone abbreviation may be "+01" on OpenBSD. (Issue 69840.)
|
||||
if runtime.GOOS == "openbsd" {
|
||||
wantName = append(wantName, "+01")
|
||||
}
|
||||
if !slices.Contains(wantName, name) || offset != -1*60*60 {
|
||||
t.Errorf("Now().In(loc).Zone() = %q, %d, want %q (one of), %d",
|
||||
name, offset, wantName, -1*60*60)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -12,22 +12,29 @@ import (
|
||||
"bytes"
|
||||
"log"
|
||||
"os/exec"
|
||||
"runtime"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func main() {
|
||||
checkLinkOutput("", "-B argument must start with 0x")
|
||||
// The cannot open file error indicates that the parsing of -B flag
|
||||
// succeeded and it failed at a later step.
|
||||
checkLinkOutput("0", "-B argument must start with 0x")
|
||||
checkLinkOutput("0x", "usage")
|
||||
checkLinkOutput("0x", "cannot open file nonexistent.o")
|
||||
checkLinkOutput("0x0", "-B argument must have even number of digits")
|
||||
checkLinkOutput("0x00", "usage")
|
||||
checkLinkOutput("0x00", "cannot open file nonexistent.o")
|
||||
checkLinkOutput("0xYZ", "-B argument contains invalid hex digit")
|
||||
checkLinkOutput("0x"+strings.Repeat("00", 32), "usage")
|
||||
checkLinkOutput("0x"+strings.Repeat("00", 33), "-B option too long (max 32 digits)")
|
||||
|
||||
maxLen := 32
|
||||
if runtime.GOOS == "darwin" || runtime.GOOS == "ios" {
|
||||
maxLen = 16
|
||||
}
|
||||
checkLinkOutput("0x"+strings.Repeat("00", maxLen), "cannot open file nonexistent.o")
|
||||
checkLinkOutput("0x"+strings.Repeat("00", maxLen+1), "-B option too long")
|
||||
}
|
||||
|
||||
func checkLinkOutput(buildid string, message string) {
|
||||
cmd := exec.Command("go", "tool", "link", "-B", buildid)
|
||||
cmd := exec.Command("go", "tool", "link", "-B", buildid, "nonexistent.o")
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err == nil {
|
||||
log.Fatalf("expected cmd/link to fail")
|
||||
@@ -39,6 +46,6 @@ func checkLinkOutput(buildid string, message string) {
|
||||
}
|
||||
|
||||
if !strings.Contains(firstLine, message) {
|
||||
log.Fatalf("cmd/link output did not include expected message %q: %s", message, firstLine)
|
||||
log.Fatalf("%s: cmd/link output did not include expected message %q: %s", buildid, message, firstLine)
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user