Update to go1.24.0

This commit is contained in:
Vorapol Rinsatitnon
2025-02-14 12:42:07 +07:00
parent 25e497e367
commit bf266cebe6
3169 changed files with 236789 additions and 60275 deletions

View File

@@ -1,19 +0,0 @@
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package abi
// Map constants common to several packages
// runtime/runtime-gdb.py:MapTypePrinter contains its own copy
const (
// Maximum number of key/elem pairs a bucket can hold.
MapBucketCountBits = 3 // log2 of number of elements in a bucket.
MapBucketCount = 1 << MapBucketCountBits
// Maximum key or elem size to keep inline (instead of mallocing per element).
// Must fit in a uint8.
// Note: fast map functions cannot handle big elems (bigger than MapMaxElemBytes).
MapMaxKeyBytes = 128
MapMaxElemBytes = 128 // Must fit in a uint8.
)

View File

@@ -0,0 +1,54 @@
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package abi
import (
"unsafe"
)
// Map constants common to several packages
// runtime/runtime-gdb.py:MapTypePrinter contains its own copy
const (
// Maximum number of key/elem pairs a bucket can hold.
OldMapBucketCountBits = 3 // log2 of number of elements in a bucket.
OldMapBucketCount = 1 << OldMapBucketCountBits
// Maximum key or elem size to keep inline (instead of mallocing per element).
// Must fit in a uint8.
// Note: fast map functions cannot handle big elems (bigger than MapMaxElemBytes).
OldMapMaxKeyBytes = 128
OldMapMaxElemBytes = 128 // Must fit in a uint8.
)
type OldMapType struct {
Type
Key *Type
Elem *Type
Bucket *Type // internal type representing a hash bucket
// function for hashing keys (ptr to key, seed) -> hash
Hasher func(unsafe.Pointer, uintptr) uintptr
KeySize uint8 // size of key slot
ValueSize uint8 // size of elem slot
BucketSize uint16 // size of bucket
Flags uint32
}
// Note: flag values must match those used in the TMAP case
// in ../cmd/compile/internal/reflectdata/reflect.go:writeType.
func (mt *OldMapType) IndirectKey() bool { // store ptr to key instead of key itself
return mt.Flags&1 != 0
}
func (mt *OldMapType) IndirectElem() bool { // store ptr to elem instead of elem itself
return mt.Flags&2 != 0
}
func (mt *OldMapType) ReflexiveKey() bool { // true if k==k for all keys
return mt.Flags&4 != 0
}
func (mt *OldMapType) NeedKeyUpdate() bool { // true if we need to update key on an overwrite
return mt.Flags&8 != 0
}
func (mt *OldMapType) HashMightPanic() bool { // true if hash function might panic
return mt.Flags&16 != 0
}

View File

@@ -0,0 +1,10 @@
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !goexperiment.swissmap
package abi
// See comment in map_select_swiss.go.
type mapType = OldMapType

View File

@@ -0,0 +1,22 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build goexperiment.swissmap
package abi
// Select the map type that this binary is built using. This is for common
// lookup methods like Type.Key to know which type to use.
//
// Note that mapType *must not be used by any functions called in the
// compiler to build a target program* because the compiler must use the map
// type determined by run-time GOEXPERIMENT, not the build tags used to build
// the compiler.
//
// TODO(prattmic): This package is rather confusing because it has many
// functions that can't be used by the compiler (e.g., Type.Uncommon depends on
// the layout of type + uncommon objects in the binary. It would be incorrect
// for an ad-hoc local Type object). It may be best to move code that isn't
// usable by the compiler out of the package.
type mapType = SwissMapType

View File

@@ -0,0 +1,64 @@
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package abi
import (
"unsafe"
)
// Map constants common to several packages
// runtime/runtime-gdb.py:MapTypePrinter contains its own copy
const (
// Number of bits in the group.slot count.
SwissMapGroupSlotsBits = 3
// Number of slots in a group.
SwissMapGroupSlots = 1 << SwissMapGroupSlotsBits // 8
// Maximum key or elem size to keep inline (instead of mallocing per element).
// Must fit in a uint8.
SwissMapMaxKeyBytes = 128
SwissMapMaxElemBytes = 128
ctrlEmpty = 0b10000000
bitsetLSB = 0x0101010101010101
// Value of control word with all empty slots.
SwissMapCtrlEmpty = bitsetLSB * uint64(ctrlEmpty)
)
type SwissMapType struct {
Type
Key *Type
Elem *Type
Group *Type // internal type representing a slot group
// function for hashing keys (ptr to key, seed) -> hash
Hasher func(unsafe.Pointer, uintptr) uintptr
GroupSize uintptr // == Group.Size_
SlotSize uintptr // size of key/elem slot
ElemOff uintptr // offset of elem in key/elem slot
Flags uint32
}
// Flag values
const (
SwissMapNeedKeyUpdate = 1 << iota
SwissMapHashMightPanic
SwissMapIndirectKey
SwissMapIndirectElem
)
func (mt *SwissMapType) NeedKeyUpdate() bool { // true if we need to update key on an overwrite
return mt.Flags&SwissMapNeedKeyUpdate != 0
}
func (mt *SwissMapType) HashMightPanic() bool { // true if hash function might panic
return mt.Flags&SwissMapHashMightPanic != 0
}
func (mt *SwissMapType) IndirectKey() bool { // store ptr to key instead of key itself
return mt.Flags&SwissMapIndirectKey != 0
}
func (mt *SwissMapType) IndirectElem() bool { // store ptr to elem instead of elem itself
return mt.Flags&SwissMapIndirectElem != 0
}

View File

@@ -29,8 +29,16 @@ type Type struct {
// (ptr to object A, ptr to object B) -> ==?
Equal func(unsafe.Pointer, unsafe.Pointer) bool
// GCData stores the GC type data for the garbage collector.
// If the KindGCProg bit is set in kind, GCData is a GC program.
// Otherwise it is a ptrmask bitmap. See mbitmap.go for details.
// Normally, GCData points to a bitmask that describes the
// ptr/nonptr fields of the type. The bitmask will have at
// least PtrBytes/ptrSize bits.
// If the TFlagGCMaskOnDemand bit is set, GCData is instead a
// **byte and the pointer to the bitmask is one dereference away.
// The runtime will build the bitmask if needed.
// (See runtime/type.go:getGCMask.)
// Note: multiple types may have the same value of GCData,
// including when TFlagGCMaskOnDemand is set. The types will, of course,
// have the same pointer layout (but not necessarily the same size).
GCData *byte
Str NameOff // string form
PtrToThis TypeOff // type for pointer to this type, may be zero
@@ -73,7 +81,6 @@ const (
const (
// TODO (khr, drchase) why aren't these in TFlag? Investigate, fix if possible.
KindDirectIface Kind = 1 << 5
KindGCProg Kind = 1 << 6 // Type.gc points to GC program
KindMask Kind = (1 << 5) - 1
)
@@ -112,11 +119,12 @@ const (
// this type as a single region of t.size bytes.
TFlagRegularMemory TFlag = 1 << 3
// TFlagUnrolledBitmap marks special types that are unrolled-bitmap
// versions of types with GC programs.
// These types need to be deallocated when the underlying object
// is freed.
TFlagUnrolledBitmap TFlag = 1 << 4
// TFlagGCMaskOnDemand means that the GC pointer bitmask will be
// computed on demand at runtime instead of being precomputed at
// compile time. If this flag is set, the GCData field effectively
// has type **byte instead of *byte. The runtime will store a
// pointer to the GC pointer bitmask in *GCData.
TFlagGCMaskOnDemand TFlag = 1 << 4
)
// NameOff is the offset to a name from moduledata.types. See resolveNameOff in runtime.
@@ -206,6 +214,9 @@ func (t *Type) IsDirectIface() bool {
}
func (t *Type) GcSlice(begin, end uintptr) []byte {
if t.TFlag&TFlagGCMaskOnDemand != 0 {
panic("GcSlice can't handle on-demand gcdata types")
}
return unsafe.Slice(t.GCData, int(end))[begin:]
}
@@ -350,7 +361,7 @@ func (t *Type) Uncommon() *UncommonType {
return &(*u)(unsafe.Pointer(t)).u
case Map:
type u struct {
MapType
mapType
u UncommonType
}
return &(*u)(unsafe.Pointer(t)).u
@@ -379,7 +390,7 @@ func (t *Type) Elem() *Type {
tt := (*ChanType)(unsafe.Pointer(t))
return tt.Elem
case Map:
tt := (*MapType)(unsafe.Pointer(t))
tt := (*mapType)(unsafe.Pointer(t))
return tt.Elem
case Pointer:
tt := (*PtrType)(unsafe.Pointer(t))
@@ -399,12 +410,12 @@ func (t *Type) StructType() *StructType {
return (*StructType)(unsafe.Pointer(t))
}
// MapType returns t cast to a *MapType, or nil if its tag does not match.
func (t *Type) MapType() *MapType {
// MapType returns t cast to a *OldMapType or *SwissMapType, or nil if its tag does not match.
func (t *Type) MapType() *mapType {
if t.Kind() != Map {
return nil
}
return (*MapType)(unsafe.Pointer(t))
return (*mapType)(unsafe.Pointer(t))
}
// ArrayType returns t cast to a *ArrayType, or nil if its tag does not match.
@@ -464,40 +475,9 @@ func (t *Type) NumMethod() int {
// NumMethod returns the number of interface methods in the type's method set.
func (t *InterfaceType) NumMethod() int { return len(t.Methods) }
type MapType struct {
Type
Key *Type
Elem *Type
Bucket *Type // internal type representing a hash bucket
// function for hashing keys (ptr to key, seed) -> hash
Hasher func(unsafe.Pointer, uintptr) uintptr
KeySize uint8 // size of key slot
ValueSize uint8 // size of elem slot
BucketSize uint16 // size of bucket
Flags uint32
}
// Note: flag values must match those used in the TMAP case
// in ../cmd/compile/internal/reflectdata/reflect.go:writeType.
func (mt *MapType) IndirectKey() bool { // store ptr to key instead of key itself
return mt.Flags&1 != 0
}
func (mt *MapType) IndirectElem() bool { // store ptr to elem instead of elem itself
return mt.Flags&2 != 0
}
func (mt *MapType) ReflexiveKey() bool { // true if k==k for all keys
return mt.Flags&4 != 0
}
func (mt *MapType) NeedKeyUpdate() bool { // true if we need to update key on an overwrite
return mt.Flags&8 != 0
}
func (mt *MapType) HashMightPanic() bool { // true if hash function might panic
return mt.Flags&16 != 0
}
func (t *Type) Key() *Type {
if t.Kind() == Map {
return (*MapType)(unsafe.Pointer(t)).Key
return (*mapType)(unsafe.Pointer(t)).Key
}
return nil
}

View File

@@ -23,7 +23,7 @@ var (
GOROOT = os.Getenv("GOROOT") // cached for efficiency
GOARCH = envOr("GOARCH", defaultGOARCH)
GOOS = envOr("GOOS", defaultGOOS)
GO386 = envOr("GO386", defaultGO386)
GO386 = envOr("GO386", DefaultGO386)
GOAMD64 = goamd64()
GOARM = goarm()
GOARM64 = goarm64()
@@ -34,6 +34,7 @@ var (
GOWASM = gowasm()
ToolTags = toolTags()
GO_LDSO = defaultGO_LDSO
GOFIPS140 = gofips140()
Version = version
)
@@ -56,7 +57,7 @@ func envOr(key, value string) string {
}
func goamd64() int {
switch v := envOr("GOAMD64", defaultGOAMD64); v {
switch v := envOr("GOAMD64", DefaultGOAMD64); v {
case "v1":
return 1
case "v2":
@@ -67,15 +68,56 @@ func goamd64() int {
return 4
}
Error = fmt.Errorf("invalid GOAMD64: must be v1, v2, v3, v4")
return int(defaultGOAMD64[len("v")] - '0')
return int(DefaultGOAMD64[len("v")] - '0')
}
type goarmFeatures struct {
func gofips140() string {
v := envOr("GOFIPS140", DefaultGOFIPS140)
switch v {
case "off", "latest", "inprocess", "certified":
return v
}
if isFIPSVersion(v) {
return v
}
Error = fmt.Errorf("invalid GOFIPS140: must be off, latest, inprocess, certified, or vX.Y.Z")
return DefaultGOFIPS140
}
// isFIPSVersion reports whether v is a valid FIPS version,
// of the form vX.Y.Z.
func isFIPSVersion(v string) bool {
if !strings.HasPrefix(v, "v") {
return false
}
v, ok := skipNum(v[len("v"):])
if !ok || !strings.HasPrefix(v, ".") {
return false
}
v, ok = skipNum(v[len("."):])
if !ok || !strings.HasPrefix(v, ".") {
return false
}
v, ok = skipNum(v[len("."):])
return ok && v == ""
}
// skipNum skips the leading text matching [0-9]+
// in s, returning the rest and whether such text was found.
func skipNum(s string) (rest string, ok bool) {
i := 0
for i < len(s) && '0' <= s[i] && s[i] <= '9' {
i++
}
return s[i:], i > 0
}
type GoarmFeatures struct {
Version int
SoftFloat bool
}
func (g goarmFeatures) String() string {
func (g GoarmFeatures) String() string {
armStr := strconv.Itoa(g.Version)
if g.SoftFloat {
armStr += ",softfloat"
@@ -85,12 +127,12 @@ func (g goarmFeatures) String() string {
return armStr
}
func goarm() (g goarmFeatures) {
func goarm() (g GoarmFeatures) {
const (
softFloatOpt = ",softfloat"
hardFloatOpt = ",hardfloat"
)
def := defaultGOARM
def := DefaultGOARM
if GOOS == "android" && GOARCH == "arm" {
// Android arm devices always support GOARM=7.
def = "7"
@@ -186,14 +228,14 @@ func ParseGoarm64(v string) (g Goarm64Features, e error) {
default:
e = fmt.Errorf("invalid GOARM64: must start with v8.{0-9} or v9.{0-5} and may optionally end in %q and/or %q",
lseOpt, cryptoOpt)
g.Version = defaultGOARM64
g.Version = DefaultGOARM64
}
return
}
func goarm64() (g Goarm64Features) {
g, Error = ParseGoarm64(envOr("GOARM64", defaultGOARM64))
g, Error = ParseGoarm64(envOr("GOARM64", DefaultGOARM64))
return
}
@@ -229,25 +271,25 @@ func (g Goarm64Features) Supports(s string) bool {
}
func gomips() string {
switch v := envOr("GOMIPS", defaultGOMIPS); v {
switch v := envOr("GOMIPS", DefaultGOMIPS); v {
case "hardfloat", "softfloat":
return v
}
Error = fmt.Errorf("invalid GOMIPS: must be hardfloat, softfloat")
return defaultGOMIPS
return DefaultGOMIPS
}
func gomips64() string {
switch v := envOr("GOMIPS64", defaultGOMIPS64); v {
switch v := envOr("GOMIPS64", DefaultGOMIPS64); v {
case "hardfloat", "softfloat":
return v
}
Error = fmt.Errorf("invalid GOMIPS64: must be hardfloat, softfloat")
return defaultGOMIPS64
return DefaultGOMIPS64
}
func goppc64() int {
switch v := envOr("GOPPC64", defaultGOPPC64); v {
switch v := envOr("GOPPC64", DefaultGOPPC64); v {
case "power8":
return 8
case "power9":
@@ -256,18 +298,18 @@ func goppc64() int {
return 10
}
Error = fmt.Errorf("invalid GOPPC64: must be power8, power9, power10")
return int(defaultGOPPC64[len("power")] - '0')
return int(DefaultGOPPC64[len("power")] - '0')
}
func goriscv64() int {
switch v := envOr("GORISCV64", defaultGORISCV64); v {
switch v := envOr("GORISCV64", DefaultGORISCV64); v {
case "rva20u64":
return 20
case "rva22u64":
return 22
}
Error = fmt.Errorf("invalid GORISCV64: must be rva20u64, rva22u64")
v := defaultGORISCV64[len("rva"):]
v := DefaultGORISCV64[len("rva"):]
i := strings.IndexFunc(v, func(r rune) bool {
return r < '0' || r > '9'
})

View File

@@ -123,3 +123,39 @@ func TestGogoarchTags(t *testing.T) {
GOARCH = old_goarch
GOARM64 = old_goarm64
}
var goodFIPS = []string{
"v1.0.0",
"v1.0.1",
"v1.2.0",
"v1.2.3",
}
var badFIPS = []string{
"v1.0.0-fips",
"v1.0.0+fips",
"1.0.0",
"x1.0.0",
}
func TestIsFIPSVersion(t *testing.T) {
// good
for _, s := range goodFIPS {
if !isFIPSVersion(s) {
t.Errorf("isFIPSVersion(%q) = false, want true", s)
}
}
// truncated
const v = "v1.2.3"
for i := 0; i < len(v); i++ {
if isFIPSVersion(v[:i]) {
t.Errorf("isFIPSVersion(%q) = true, want false", v[:i])
}
}
// bad
for _, s := range badFIPS {
if isFIPSVersion(s) {
t.Errorf("isFIPSVersion(%q) = true, want false", s)
}
}
}

View File

@@ -67,10 +67,20 @@ func ParseGOEXPERIMENT(goos, goarch, goexp string) (*ExperimentFlags, error) {
regabiSupported = true
}
var haveXchg8 bool
switch goarch {
case "386", "amd64", "arm", "arm64", "ppc64le", "ppc64":
haveXchg8 = true
}
baseline := goexperiment.Flags{
RegabiWrappers: regabiSupported,
RegabiArgs: regabiSupported,
CoverageRedesign: true,
AliasTypeParams: true,
SwissMap: true,
SpinbitMutex: haveXchg8,
SyncHashTrieMap: true,
}
// Start with the statically enabled set of experiments.

View File

@@ -28,58 +28,136 @@ TEXT runtime·cmpstring<ABIInternal>(SB),NOSPLIT,$0-40
// R7 length of b
// R4 points to the start of a
// R6 points to the start of b
// R13 points to the return value (-1/0/1)
// for regabi the return value (-1/0/1) in R4
TEXT cmpbody<>(SB),NOSPLIT|NOFRAME,$0
BEQ R4, R6, samebytes // same start of a and b
BEQ R4, R6, cmp_len // same start of a and b, then compare lengths
SGTU R5, R7, R9
BNE R0, R9, r2_lt_r1
BNE R9, b_lt_a
MOVV R5, R14
JMP entry
r2_lt_r1:
MOVV R7, R14 // R14 is min(R4, R5)
b_lt_a:
MOVV R7, R14 // R14 is min(R5, R7)
entry:
ADDV R4, R14, R12 // R6 start of a, R14 end of a
BEQ R4, R12, samebytes // length is 0
ADDV R4, R14, R12 // R4 start of a, R12 end of a
BEQ R4, R12, cmp_len // minlength is 0
SRLV $4, R14 // R14 is number of chunks
BEQ R0, R14, byte_loop
tail:
MOVV $2, R15
BLT R14, R15, cmp1 // min < 2
SLLV $1, R15
BLT R14, R15, cmp2 // min < 4
SLLV $1, R15
BLT R14, R15, cmp4 // min < 8
SLLV $1, R15
BLT R14, R15, cmp8 // min < 16
SLLV $1, R15
BLT R14, R15, cmp16 // min < 32
// make sure both a and b are aligned.
OR R4, R6, R15
AND $7, R15
BNE R0, R15, byte_loop
PCALIGN $16
chunk16_loop:
BEQ R0, R14, byte_loop
// When min >= 32 bytes, enter the cmp32_loop loop processing:
// take out 4 8-bytes from a and b in turn for comparison.
cmp32_loop:
MOVV (R4), R8
MOVV (R6), R9
BNE R8, R9, byte_loop
MOVV 8(R4), R16
MOVV 8(R6), R17
MOVV 8(R4), R10
MOVV 8(R6), R11
BNE R8, R9, cmp8a
BNE R10, R11, cmp8b
MOVV 16(R4), R8
MOVV 16(R6), R9
MOVV 24(R4), R10
MOVV 24(R6), R11
BNE R8, R9, cmp8a
BNE R10, R11, cmp8b
ADDV $32, R4
ADDV $32, R6
SUBV $32, R14
BGE R14, R15, cmp32_loop
BEQ R14, cmp_len
check16:
MOVV $16, R15
BLT R14, R15, check8
cmp16:
MOVV (R4), R8
MOVV (R6), R9
MOVV 8(R4), R10
MOVV 8(R6), R11
BNE R8, R9, cmp8a
BNE R10, R11, cmp8b
ADDV $16, R4
ADDV $16, R6
SUBVU $1, R14
BEQ R16, R17, chunk16_loop
SUBV $8, R4
SUBV $8, R6
SUBV $16, R14
BEQ R14, cmp_len
byte_loop:
BEQ R4, R12, samebytes
check8:
MOVV $8, R15
BLT R14, R15, check4
cmp8:
MOVV (R4), R8
MOVV (R6), R9
BNE R8, R9, cmp8a
ADDV $8, R4
ADDV $8, R6
SUBV $8, R14
BEQ R14, cmp_len
check4:
MOVV $4, R15
BLT R14, R15, check2
cmp4:
MOVW (R4), R8
MOVW (R6), R9
BNE R8, R9, cmp8a
ADDV $4, R4
ADDV $4, R6
SUBV $4, R14
BEQ R14, cmp_len
check2:
MOVV $2, R15
BLT R14, R15, cmp1
cmp2:
MOVH (R4), R8
MOVH (R6), R9
BNE R8, R9, cmp8a
ADDV $2, R4
ADDV $2, R6
SUBV $2, R14
BEQ R14, cmp_len
cmp1:
BEQ R14, cmp_len
MOVBU (R4), R8
ADDVU $1, R4
MOVBU (R6), R9
ADDVU $1, R6
BEQ R8, R9, byte_loop
BNE R8, R9, byte_cmp
JMP cmp_len
// Compare 8/4/2 bytes taken from R8/R9 that are known to differ.
cmp8a:
MOVV R8, R10
MOVV R9, R11
// Compare 8/4/2 bytes taken from R10/R11 that are known to differ.
cmp8b:
MOVV $0xff, R15
// Take single bytes from R10/R11 in turn for cyclic comparison.
cmp8_loop:
AND R10, R15, R8
AND R11, R15, R9
BNE R8, R9, byte_cmp
SLLV $8, R15
JMP cmp8_loop
// Compare 1 bytes taken from R8/R9 that are known to differ.
byte_cmp:
SGTU R8, R9, R4 // R12 = 1 if (R8 > R9)
SGTU R8, R9, R4 // R4 = 1 if (R8 > R9)
BNE R0, R4, ret
MOVV $-1, R4
JMP ret
samebytes:
cmp_len:
SGTU R5, R7, R8
SGTU R7, R5, R9
SUBV R9, R8, R4

View File

@@ -61,7 +61,7 @@ TEXT ·Compare<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-56
CMP R3,R6,CR7
ISEL CR0LT,R4,R7,R9
SETB_CR0(R3)
BC $12,30,LR // beqlr cr7
BEQ CR7,LR
BR cmpbody<>(SB)
TEXT runtime·cmpstring<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-40
@@ -83,7 +83,7 @@ TEXT runtime·cmpstring<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-40
MOVD R5,R6
MOVD R3,R5
SETB_CR0(R3)
BC $12,30,LR // beqlr cr7
BEQ CR7,LR
BR cmpbody<>(SB)
#ifdef GOARCH_ppc64le
@@ -143,7 +143,7 @@ cmp64_loop:
ADD $64,R5,R5 // increment to next 64 bytes of A
ADD $64,R6,R6 // increment to next 64 bytes of B
BDNZ cmp64_loop
BC $12,2,LR // beqlr
BEQ CR0,LR // beqlr
// Finish out tail with minimal overlapped checking.
// Note, 0 tail is handled by beqlr above.
@@ -215,7 +215,7 @@ cmp32: // 32 - 63B
VCMPEQUDCC V3,V4,V1
BGE CR6,different
BC $12,2,LR // beqlr
BEQ CR0,LR
ADD R9,R10,R10
LXVD2X (R9)(R5),V3
@@ -236,7 +236,7 @@ cmp16: // 16 - 31B
LXVD2X (R0)(R6),V4
VCMPEQUDCC V3,V4,V1
BGE CR6,different
BC $12,2,LR // beqlr
BEQ CR0,LR
LXVD2X (R9)(R5),V3
LXVD2X (R9)(R6),V4

View File

@@ -5,25 +5,11 @@
#include "go_asm.h"
#include "textflag.h"
// memequal(a, b unsafe.Pointer, size uintptr) bool
TEXT runtime·memequal<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-25
// short path to handle 0-byte case
CBZ R2, equal
// short path to handle equal pointers
CMP R0, R1
BEQ equal
B memeqbody<>(SB)
equal:
MOVD $1, R0
RET
// memequal_varlen(a, b unsafe.Pointer) bool
TEXT runtime·memequal_varlen<ABIInternal>(SB),NOSPLIT,$0-17
CMP R0, R1
BEQ eq
MOVD 8(R26), R2 // compiler stores size at offset 8 in the closure
CBZ R2, eq
B memeqbody<>(SB)
B runtime·memequal<ABIInternal>(SB)
eq:
MOVD $1, R0
RET
@@ -33,7 +19,13 @@ eq:
// R1: pointer b
// R2: data len
// at return: result in R0
TEXT memeqbody<>(SB),NOSPLIT,$0
// memequal(a, b unsafe.Pointer, size uintptr) bool
TEXT runtime·memequal<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-25
// short path to handle 0-byte case
CBZ R2, equal
// short path to handle equal pointers
CMP R0, R1
BEQ equal
CMP $1, R2
// handle 1-byte special case for better performance
BEQ one
@@ -91,6 +83,7 @@ tail:
EOR R4, R5
CBNZ R5, not_equal
B equal
PCALIGN $16
lt_8:
TBZ $2, R2, lt_4
MOVWU (R0), R4
@@ -103,6 +96,7 @@ lt_8:
EOR R4, R5
CBNZ R5, not_equal
B equal
PCALIGN $16
lt_4:
TBZ $1, R2, lt_2
MOVHU.P 2(R0), R4

View File

@@ -10,33 +10,14 @@ TEXT ·IndexByte<ABIInternal>(SB),NOSPLIT,$0-40
// X11 = b_len
// X12 = b_cap (unused)
// X13 = byte to find
AND $0xff, X13
MOV X10, X12 // store base for later
ADD X10, X11 // end
SUB $1, X10
loop:
ADD $1, X10
BEQ X10, X11, notfound
MOVBU (X10), X14
BNE X13, X14, loop
SUB X12, X10 // remove base
RET
notfound:
MOV $-1, X10
RET
TEXT ·IndexByteString<ABIInternal>(SB),NOSPLIT,$0-32
// X10 = b_base
// X11 = b_len
// X12 = byte to find
AND $0xff, X12
AND $0xff, X13, X12 // x12 byte to look for
MOV X10, X13 // store base for later
ADD X10, X11 // end
SUB $1, X10
SLTI $24, X11, X14
ADD X10, X11 // end
BEQZ X14, bigBody
SUB $1, X10
loop:
ADD $1, X10
BEQ X10, X11, notfound
@@ -49,3 +30,110 @@ loop:
notfound:
MOV $-1, X10
RET
bigBody:
JMP indexByteBig<>(SB)
TEXT ·IndexByteString<ABIInternal>(SB),NOSPLIT,$0-32
// X10 = b_base
// X11 = b_len
// X12 = byte to find
AND $0xff, X12 // x12 byte to look for
MOV X10, X13 // store base for later
SLTI $24, X11, X14
ADD X10, X11 // end
BEQZ X14, bigBody
SUB $1, X10
loop:
ADD $1, X10
BEQ X10, X11, notfound
MOVBU (X10), X14
BNE X12, X14, loop
SUB X13, X10 // remove base
RET
notfound:
MOV $-1, X10
RET
bigBody:
JMP indexByteBig<>(SB)
TEXT indexByteBig<>(SB),NOSPLIT|NOFRAME,$0
// On entry
// X10 = b_base
// X11 = end
// X12 = byte to find
// X13 = b_base
// X11 is at least 16 bytes > X10
// On exit
// X10 = index of first instance of sought byte, if found, or -1 otherwise
// Process the first few bytes until we get to an 8 byte boundary
// No need to check for end here as we have at least 16 bytes in
// the buffer.
unalignedloop:
AND $7, X10, X14
BEQZ X14, aligned
MOVBU (X10), X14
BEQ X12, X14, found
ADD $1, X10
JMP unalignedloop
aligned:
AND $~7, X11, X15 // X15 = end of aligned data
// We have at least 9 bytes left
// Use 'Determine if a word has a byte equal to n' bit hack from
// https://graphics.stanford.edu/~seander/bithacks.html to determine
// whether the byte is present somewhere in the next 8 bytes of the
// array.
MOV $0x0101010101010101, X16
SLLI $7, X16, X17 // X17 = 0x8080808080808080
MUL X12, X16, X18 // broadcast X12 to every byte in X18
alignedloop:
MOV (X10), X14
XOR X14, X18, X19
// If the LSB in X12 is present somewhere in the 8 bytes we've just
// loaded into X14 then at least one of the bytes in X19 will be 0
// after the XOR. If any of the bytes in X19 are zero then
//
// ((X19 - X16) & (~X19) & X17)
//
// will be non-zero. The expression will evaluate to zero if none of
// the bytes in X19 are zero, i.e., X12 is not present in X14.
SUB X16, X19, X20
ANDN X19, X17, X21
AND X20, X21
BNEZ X21, tailloop // If X21 != 0 X12 is present in X14
ADD $8, X10
BNE X10, X15, alignedloop
tailloop:
SUB $1, X10
loop:
ADD $1, X10
BEQ X10, X11, notfound
MOVBU (X10), X14
BNE X12, X14, loop
found:
SUB X13, X10 // remove base
RET
notfound:
MOV $-1, X10
RET

View File

@@ -6,30 +6,30 @@
// little and big endian integer types from/to byte slices.
package byteorder
func LeUint16(b []byte) uint16 {
func LEUint16(b []byte) uint16 {
_ = b[1] // bounds check hint to compiler; see golang.org/issue/14808
return uint16(b[0]) | uint16(b[1])<<8
}
func LePutUint16(b []byte, v uint16) {
func LEPutUint16(b []byte, v uint16) {
_ = b[1] // early bounds check to guarantee safety of writes below
b[0] = byte(v)
b[1] = byte(v >> 8)
}
func LeAppendUint16(b []byte, v uint16) []byte {
func LEAppendUint16(b []byte, v uint16) []byte {
return append(b,
byte(v),
byte(v>>8),
)
}
func LeUint32(b []byte) uint32 {
func LEUint32(b []byte) uint32 {
_ = b[3] // bounds check hint to compiler; see golang.org/issue/14808
return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
}
func LePutUint32(b []byte, v uint32) {
func LEPutUint32(b []byte, v uint32) {
_ = b[3] // early bounds check to guarantee safety of writes below
b[0] = byte(v)
b[1] = byte(v >> 8)
@@ -37,7 +37,7 @@ func LePutUint32(b []byte, v uint32) {
b[3] = byte(v >> 24)
}
func LeAppendUint32(b []byte, v uint32) []byte {
func LEAppendUint32(b []byte, v uint32) []byte {
return append(b,
byte(v),
byte(v>>8),
@@ -46,13 +46,13 @@ func LeAppendUint32(b []byte, v uint32) []byte {
)
}
func LeUint64(b []byte) uint64 {
func LEUint64(b []byte) uint64 {
_ = b[7] // bounds check hint to compiler; see golang.org/issue/14808
return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
}
func LePutUint64(b []byte, v uint64) {
func LEPutUint64(b []byte, v uint64) {
_ = b[7] // early bounds check to guarantee safety of writes below
b[0] = byte(v)
b[1] = byte(v >> 8)
@@ -64,7 +64,7 @@ func LePutUint64(b []byte, v uint64) {
b[7] = byte(v >> 56)
}
func LeAppendUint64(b []byte, v uint64) []byte {
func LEAppendUint64(b []byte, v uint64) []byte {
return append(b,
byte(v),
byte(v>>8),
@@ -77,30 +77,30 @@ func LeAppendUint64(b []byte, v uint64) []byte {
)
}
func BeUint16(b []byte) uint16 {
func BEUint16(b []byte) uint16 {
_ = b[1] // bounds check hint to compiler; see golang.org/issue/14808
return uint16(b[1]) | uint16(b[0])<<8
}
func BePutUint16(b []byte, v uint16) {
func BEPutUint16(b []byte, v uint16) {
_ = b[1] // early bounds check to guarantee safety of writes below
b[0] = byte(v >> 8)
b[1] = byte(v)
}
func BeAppendUint16(b []byte, v uint16) []byte {
func BEAppendUint16(b []byte, v uint16) []byte {
return append(b,
byte(v>>8),
byte(v),
)
}
func BeUint32(b []byte) uint32 {
func BEUint32(b []byte) uint32 {
_ = b[3] // bounds check hint to compiler; see golang.org/issue/14808
return uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24
}
func BePutUint32(b []byte, v uint32) {
func BEPutUint32(b []byte, v uint32) {
_ = b[3] // early bounds check to guarantee safety of writes below
b[0] = byte(v >> 24)
b[1] = byte(v >> 16)
@@ -108,7 +108,7 @@ func BePutUint32(b []byte, v uint32) {
b[3] = byte(v)
}
func BeAppendUint32(b []byte, v uint32) []byte {
func BEAppendUint32(b []byte, v uint32) []byte {
return append(b,
byte(v>>24),
byte(v>>16),
@@ -117,13 +117,13 @@ func BeAppendUint32(b []byte, v uint32) []byte {
)
}
func BeUint64(b []byte) uint64 {
func BEUint64(b []byte) uint64 {
_ = b[7] // bounds check hint to compiler; see golang.org/issue/14808
return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 |
uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56
}
func BePutUint64(b []byte, v uint64) {
func BEPutUint64(b []byte, v uint64) {
_ = b[7] // early bounds check to guarantee safety of writes below
b[0] = byte(v >> 56)
b[1] = byte(v >> 48)
@@ -135,7 +135,7 @@ func BePutUint64(b []byte, v uint64) {
b[7] = byte(v)
}
func BeAppendUint64(b []byte, v uint64) []byte {
func BEAppendUint64(b []byte, v uint64) []byte {
return append(b,
byte(v>>56),
byte(v>>48),

View File

@@ -37,12 +37,14 @@ const KnownEnv = `
GOARCH
GOARM
GOARM64
GOAUTH
GOBIN
GOCACHE
GOCACHEPROG
GOENV
GOEXE
GOEXPERIMENT
GOFIPS140
GOFLAGS
GOGCCFLAGS
GOHOSTARCH

View File

@@ -53,10 +53,10 @@ func (s *State) Next() (uint64, bool) {
// Init seeds the State with the given seed value.
func (s *State) Init(seed [32]byte) {
s.Init64([4]uint64{
byteorder.LeUint64(seed[0*8:]),
byteorder.LeUint64(seed[1*8:]),
byteorder.LeUint64(seed[2*8:]),
byteorder.LeUint64(seed[3*8:]),
byteorder.LEUint64(seed[0*8:]),
byteorder.LEUint64(seed[1*8:]),
byteorder.LEUint64(seed[2*8:]),
byteorder.LEUint64(seed[3*8:]),
})
}
@@ -124,9 +124,9 @@ func Marshal(s *State) []byte {
data := make([]byte, 6*8)
copy(data, "chacha8:")
used := (s.c/ctrInc)*chunk + s.i
byteorder.BePutUint64(data[1*8:], uint64(used))
byteorder.BEPutUint64(data[1*8:], uint64(used))
for i, seed := range s.seed {
byteorder.LePutUint64(data[(2+i)*8:], seed)
byteorder.LEPutUint64(data[(2+i)*8:], seed)
}
return data
}
@@ -142,12 +142,12 @@ func Unmarshal(s *State, data []byte) error {
if len(data) != 6*8 || string(data[:8]) != "chacha8:" {
return new(errUnmarshalChaCha8)
}
used := byteorder.BeUint64(data[1*8:])
used := byteorder.BEUint64(data[1*8:])
if used > (ctrMax/ctrInc)*chunk-reseed {
return new(errUnmarshalChaCha8)
}
for i := range s.seed {
s.seed[i] = byteorder.LeUint64(data[(2+i)*8:])
s.seed[i] = byteorder.LEUint64(data[(2+i)*8:])
}
s.c = ctrInc * (uint32(used) / chunk)
block(&s.seed, &s.buf, s.c)

View File

@@ -1,408 +0,0 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package concurrent
import (
"internal/abi"
"internal/goarch"
"math/rand/v2"
"sync"
"sync/atomic"
"unsafe"
)
// HashTrieMap is an implementation of a concurrent hash-trie. The implementation
// is designed around frequent loads, but offers decent performance for stores
// and deletes as well, especially if the map is larger. It's primary use-case is
// the unique package, but can be used elsewhere as well.
type HashTrieMap[K, V comparable] struct {
root *indirect[K, V]
keyHash hashFunc
keyEqual equalFunc
valEqual equalFunc
seed uintptr
}
// NewHashTrieMap creates a new HashTrieMap for the provided key and value.
func NewHashTrieMap[K, V comparable]() *HashTrieMap[K, V] {
var m map[K]V
mapType := abi.TypeOf(m).MapType()
ht := &HashTrieMap[K, V]{
root: newIndirectNode[K, V](nil),
keyHash: mapType.Hasher,
keyEqual: mapType.Key.Equal,
valEqual: mapType.Elem.Equal,
seed: uintptr(rand.Uint64()),
}
return ht
}
type hashFunc func(unsafe.Pointer, uintptr) uintptr
type equalFunc func(unsafe.Pointer, unsafe.Pointer) bool
// Load returns the value stored in the map for a key, or nil if no
// value is present.
// The ok result indicates whether value was found in the map.
func (ht *HashTrieMap[K, V]) Load(key K) (value V, ok bool) {
hash := ht.keyHash(abi.NoEscape(unsafe.Pointer(&key)), ht.seed)
i := ht.root
hashShift := 8 * goarch.PtrSize
for hashShift != 0 {
hashShift -= nChildrenLog2
n := i.children[(hash>>hashShift)&nChildrenMask].Load()
if n == nil {
return *new(V), false
}
if n.isEntry {
return n.entry().lookup(key, ht.keyEqual)
}
i = n.indirect()
}
panic("internal/concurrent.HashMapTrie: ran out of hash bits while iterating")
}
// LoadOrStore returns the existing value for the key if present.
// Otherwise, it stores and returns the given value.
// The loaded result is true if the value was loaded, false if stored.
func (ht *HashTrieMap[K, V]) LoadOrStore(key K, value V) (result V, loaded bool) {
hash := ht.keyHash(abi.NoEscape(unsafe.Pointer(&key)), ht.seed)
var i *indirect[K, V]
var hashShift uint
var slot *atomic.Pointer[node[K, V]]
var n *node[K, V]
for {
// Find the key or a candidate location for insertion.
i = ht.root
hashShift = 8 * goarch.PtrSize
haveInsertPoint := false
for hashShift != 0 {
hashShift -= nChildrenLog2
slot = &i.children[(hash>>hashShift)&nChildrenMask]
n = slot.Load()
if n == nil {
// We found a nil slot which is a candidate for insertion.
haveInsertPoint = true
break
}
if n.isEntry {
// We found an existing entry, which is as far as we can go.
// If it stays this way, we'll have to replace it with an
// indirect node.
if v, ok := n.entry().lookup(key, ht.keyEqual); ok {
return v, true
}
haveInsertPoint = true
break
}
i = n.indirect()
}
if !haveInsertPoint {
panic("internal/concurrent.HashMapTrie: ran out of hash bits while iterating")
}
// Grab the lock and double-check what we saw.
i.mu.Lock()
n = slot.Load()
if (n == nil || n.isEntry) && !i.dead.Load() {
// What we saw is still true, so we can continue with the insert.
break
}
// We have to start over.
i.mu.Unlock()
}
// N.B. This lock is held from when we broke out of the outer loop above.
// We specifically break this out so that we can use defer here safely.
// One option is to break this out into a new function instead, but
// there's so much local iteration state used below that this turns out
// to be cleaner.
defer i.mu.Unlock()
var oldEntry *entry[K, V]
if n != nil {
oldEntry = n.entry()
if v, ok := oldEntry.lookup(key, ht.keyEqual); ok {
// Easy case: by loading again, it turns out exactly what we wanted is here!
return v, true
}
}
newEntry := newEntryNode(key, value)
if oldEntry == nil {
// Easy case: create a new entry and store it.
slot.Store(&newEntry.node)
} else {
// We possibly need to expand the entry already there into one or more new nodes.
//
// Publish the node last, which will make both oldEntry and newEntry visible. We
// don't want readers to be able to observe that oldEntry isn't in the tree.
slot.Store(ht.expand(oldEntry, newEntry, hash, hashShift, i))
}
return value, false
}
// expand takes oldEntry and newEntry whose hashes conflict from bit 64 down to hashShift and
// produces a subtree of indirect nodes to hold the two new entries.
func (ht *HashTrieMap[K, V]) expand(oldEntry, newEntry *entry[K, V], newHash uintptr, hashShift uint, parent *indirect[K, V]) *node[K, V] {
// Check for a hash collision.
oldHash := ht.keyHash(unsafe.Pointer(&oldEntry.key), ht.seed)
if oldHash == newHash {
// Store the old entry in the new entry's overflow list, then store
// the new entry.
newEntry.overflow.Store(oldEntry)
return &newEntry.node
}
// We have to add an indirect node. Worse still, we may need to add more than one.
newIndirect := newIndirectNode(parent)
top := newIndirect
for {
if hashShift == 0 {
panic("internal/concurrent.HashMapTrie: ran out of hash bits while inserting")
}
hashShift -= nChildrenLog2 // hashShift is for the level parent is at. We need to go deeper.
oi := (oldHash >> hashShift) & nChildrenMask
ni := (newHash >> hashShift) & nChildrenMask
if oi != ni {
newIndirect.children[oi].Store(&oldEntry.node)
newIndirect.children[ni].Store(&newEntry.node)
break
}
nextIndirect := newIndirectNode(newIndirect)
newIndirect.children[oi].Store(&nextIndirect.node)
newIndirect = nextIndirect
}
return &top.node
}
// CompareAndDelete deletes the entry for key if its value is equal to old.
//
// If there is no current value for key in the map, CompareAndDelete returns false
// (even if the old value is the nil interface value).
func (ht *HashTrieMap[K, V]) CompareAndDelete(key K, old V) (deleted bool) {
hash := ht.keyHash(abi.NoEscape(unsafe.Pointer(&key)), ht.seed)
var i *indirect[K, V]
var hashShift uint
var slot *atomic.Pointer[node[K, V]]
var n *node[K, V]
for {
// Find the key or return when there's nothing to delete.
i = ht.root
hashShift = 8 * goarch.PtrSize
found := false
for hashShift != 0 {
hashShift -= nChildrenLog2
slot = &i.children[(hash>>hashShift)&nChildrenMask]
n = slot.Load()
if n == nil {
// Nothing to delete. Give up.
return
}
if n.isEntry {
// We found an entry. Check if it matches.
if _, ok := n.entry().lookup(key, ht.keyEqual); !ok {
// No match, nothing to delete.
return
}
// We've got something to delete.
found = true
break
}
i = n.indirect()
}
if !found {
panic("internal/concurrent.HashMapTrie: ran out of hash bits while iterating")
}
// Grab the lock and double-check what we saw.
i.mu.Lock()
n = slot.Load()
if !i.dead.Load() {
if n == nil {
// Valid node that doesn't contain what we need. Nothing to delete.
i.mu.Unlock()
return
}
if n.isEntry {
// What we saw is still true, so we can continue with the delete.
break
}
}
// We have to start over.
i.mu.Unlock()
}
// Try to delete the entry.
e, deleted := n.entry().compareAndDelete(key, old, ht.keyEqual, ht.valEqual)
if !deleted {
// Nothing was actually deleted, which means the node is no longer there.
i.mu.Unlock()
return false
}
if e != nil {
// We didn't actually delete the whole entry, just one entry in the chain.
// Nothing else to do, since the parent is definitely not empty.
slot.Store(&e.node)
i.mu.Unlock()
return true
}
// Delete the entry.
slot.Store(nil)
// Check if the node is now empty (and isn't the root), and delete it if able.
for i.parent != nil && i.empty() {
if hashShift == 8*goarch.PtrSize {
panic("internal/concurrent.HashMapTrie: ran out of hash bits while iterating")
}
hashShift += nChildrenLog2
// Delete the current node in the parent.
parent := i.parent
parent.mu.Lock()
i.dead.Store(true)
parent.children[(hash>>hashShift)&nChildrenMask].Store(nil)
i.mu.Unlock()
i = parent
}
i.mu.Unlock()
return true
}
// All returns an iter.Seq2 that produces all key-value pairs in the map.
// The enumeration does not represent any consistent snapshot of the map,
// but is guaranteed to visit each unique key-value pair only once. It is
// safe to operate on the tree during iteration. No particular enumeration
// order is guaranteed.
func (ht *HashTrieMap[K, V]) All() func(yield func(K, V) bool) {
return func(yield func(key K, value V) bool) {
ht.iter(ht.root, yield)
}
}
func (ht *HashTrieMap[K, V]) iter(i *indirect[K, V], yield func(key K, value V) bool) bool {
for j := range i.children {
n := i.children[j].Load()
if n == nil {
continue
}
if !n.isEntry {
if !ht.iter(n.indirect(), yield) {
return false
}
continue
}
e := n.entry()
for e != nil {
if !yield(e.key, e.value) {
return false
}
e = e.overflow.Load()
}
}
return true
}
const (
// 16 children. This seems to be the sweet spot for
// load performance: any smaller and we lose out on
// 50% or more in CPU performance. Any larger and the
// returns are minuscule (~1% improvement for 32 children).
nChildrenLog2 = 4
nChildren = 1 << nChildrenLog2
nChildrenMask = nChildren - 1
)
// indirect is an internal node in the hash-trie.
type indirect[K, V comparable] struct {
node[K, V]
dead atomic.Bool
mu sync.Mutex // Protects mutation to children and any children that are entry nodes.
parent *indirect[K, V]
children [nChildren]atomic.Pointer[node[K, V]]
}
func newIndirectNode[K, V comparable](parent *indirect[K, V]) *indirect[K, V] {
return &indirect[K, V]{node: node[K, V]{isEntry: false}, parent: parent}
}
func (i *indirect[K, V]) empty() bool {
nc := 0
for j := range i.children {
if i.children[j].Load() != nil {
nc++
}
}
return nc == 0
}
// entry is a leaf node in the hash-trie.
type entry[K, V comparable] struct {
node[K, V]
overflow atomic.Pointer[entry[K, V]] // Overflow for hash collisions.
key K
value V
}
func newEntryNode[K, V comparable](key K, value V) *entry[K, V] {
return &entry[K, V]{
node: node[K, V]{isEntry: true},
key: key,
value: value,
}
}
func (e *entry[K, V]) lookup(key K, equal equalFunc) (V, bool) {
for e != nil {
if equal(unsafe.Pointer(&e.key), abi.NoEscape(unsafe.Pointer(&key))) {
return e.value, true
}
e = e.overflow.Load()
}
return *new(V), false
}
// compareAndDelete deletes an entry in the overflow chain if both the key and value compare
// equal. Returns the new entry chain and whether or not anything was deleted.
//
// compareAndDelete must be called under the mutex of the indirect node which e is a child of.
func (head *entry[K, V]) compareAndDelete(key K, value V, keyEqual, valEqual equalFunc) (*entry[K, V], bool) {
if keyEqual(unsafe.Pointer(&head.key), abi.NoEscape(unsafe.Pointer(&key))) &&
valEqual(unsafe.Pointer(&head.value), abi.NoEscape(unsafe.Pointer(&value))) {
// Drop the head of the list.
return head.overflow.Load(), true
}
i := &head.overflow
e := i.Load()
for e != nil {
if keyEqual(unsafe.Pointer(&e.key), abi.NoEscape(unsafe.Pointer(&key))) &&
valEqual(unsafe.Pointer(&e.value), abi.NoEscape(unsafe.Pointer(&value))) {
i.Store(e.overflow.Load())
return head, true
}
i = &e.overflow
e = e.overflow.Load()
}
return head, false
}
// node is the header for a node. It's polymorphic and
// is actually either an entry or an indirect.
type node[K, V comparable] struct {
isEntry bool
}
func (n *node[K, V]) entry() *entry[K, V] {
if !n.isEntry {
panic("called entry on non-entry node")
}
return (*entry[K, V])(unsafe.Pointer(n))
}
func (n *node[K, V]) indirect() *indirect[K, V] {
if n.isEntry {
panic("called indirect on entry node")
}
return (*indirect[K, V])(unsafe.Pointer(n))
}

View File

@@ -1,371 +0,0 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package concurrent
import (
"fmt"
"math"
"runtime"
"strconv"
"strings"
"sync"
"testing"
"unsafe"
)
func TestHashTrieMap(t *testing.T) {
testHashTrieMap(t, func() *HashTrieMap[string, int] {
return NewHashTrieMap[string, int]()
})
}
func TestHashTrieMapBadHash(t *testing.T) {
testHashTrieMap(t, func() *HashTrieMap[string, int] {
// Stub out the good hash function with a terrible one.
// Everything should still work as expected.
m := NewHashTrieMap[string, int]()
m.keyHash = func(_ unsafe.Pointer, _ uintptr) uintptr {
return 0
}
return m
})
}
func testHashTrieMap(t *testing.T, newMap func() *HashTrieMap[string, int]) {
t.Run("LoadEmpty", func(t *testing.T) {
m := newMap()
for _, s := range testData {
expectMissing(t, s, 0)(m.Load(s))
}
})
t.Run("LoadOrStore", func(t *testing.T) {
m := newMap()
for i, s := range testData {
expectMissing(t, s, 0)(m.Load(s))
expectStored(t, s, i)(m.LoadOrStore(s, i))
expectPresent(t, s, i)(m.Load(s))
expectLoaded(t, s, i)(m.LoadOrStore(s, 0))
}
for i, s := range testData {
expectPresent(t, s, i)(m.Load(s))
expectLoaded(t, s, i)(m.LoadOrStore(s, 0))
}
})
t.Run("CompareAndDeleteAll", func(t *testing.T) {
m := newMap()
for range 3 {
for i, s := range testData {
expectMissing(t, s, 0)(m.Load(s))
expectStored(t, s, i)(m.LoadOrStore(s, i))
expectPresent(t, s, i)(m.Load(s))
expectLoaded(t, s, i)(m.LoadOrStore(s, 0))
}
for i, s := range testData {
expectPresent(t, s, i)(m.Load(s))
expectNotDeleted(t, s, math.MaxInt)(m.CompareAndDelete(s, math.MaxInt))
expectDeleted(t, s, i)(m.CompareAndDelete(s, i))
expectNotDeleted(t, s, i)(m.CompareAndDelete(s, i))
expectMissing(t, s, 0)(m.Load(s))
}
for _, s := range testData {
expectMissing(t, s, 0)(m.Load(s))
}
}
})
t.Run("CompareAndDeleteOne", func(t *testing.T) {
m := newMap()
for i, s := range testData {
expectMissing(t, s, 0)(m.Load(s))
expectStored(t, s, i)(m.LoadOrStore(s, i))
expectPresent(t, s, i)(m.Load(s))
expectLoaded(t, s, i)(m.LoadOrStore(s, 0))
}
expectNotDeleted(t, testData[15], math.MaxInt)(m.CompareAndDelete(testData[15], math.MaxInt))
expectDeleted(t, testData[15], 15)(m.CompareAndDelete(testData[15], 15))
expectNotDeleted(t, testData[15], 15)(m.CompareAndDelete(testData[15], 15))
for i, s := range testData {
if i == 15 {
expectMissing(t, s, 0)(m.Load(s))
} else {
expectPresent(t, s, i)(m.Load(s))
}
}
})
t.Run("DeleteMultiple", func(t *testing.T) {
m := newMap()
for i, s := range testData {
expectMissing(t, s, 0)(m.Load(s))
expectStored(t, s, i)(m.LoadOrStore(s, i))
expectPresent(t, s, i)(m.Load(s))
expectLoaded(t, s, i)(m.LoadOrStore(s, 0))
}
for _, i := range []int{1, 105, 6, 85} {
expectNotDeleted(t, testData[i], math.MaxInt)(m.CompareAndDelete(testData[i], math.MaxInt))
expectDeleted(t, testData[i], i)(m.CompareAndDelete(testData[i], i))
expectNotDeleted(t, testData[i], i)(m.CompareAndDelete(testData[i], i))
}
for i, s := range testData {
if i == 1 || i == 105 || i == 6 || i == 85 {
expectMissing(t, s, 0)(m.Load(s))
} else {
expectPresent(t, s, i)(m.Load(s))
}
}
})
t.Run("All", func(t *testing.T) {
m := newMap()
testAll(t, m, testDataMap(testData[:]), func(_ string, _ int) bool {
return true
})
})
t.Run("AllDelete", func(t *testing.T) {
m := newMap()
testAll(t, m, testDataMap(testData[:]), func(s string, i int) bool {
expectDeleted(t, s, i)(m.CompareAndDelete(s, i))
return true
})
for _, s := range testData {
expectMissing(t, s, 0)(m.Load(s))
}
})
t.Run("ConcurrentLifecycleUnsharedKeys", func(t *testing.T) {
m := newMap()
gmp := runtime.GOMAXPROCS(-1)
var wg sync.WaitGroup
for i := range gmp {
wg.Add(1)
go func(id int) {
defer wg.Done()
makeKey := func(s string) string {
return s + "-" + strconv.Itoa(id)
}
for _, s := range testData {
key := makeKey(s)
expectMissing(t, key, 0)(m.Load(key))
expectStored(t, key, id)(m.LoadOrStore(key, id))
expectPresent(t, key, id)(m.Load(key))
expectLoaded(t, key, id)(m.LoadOrStore(key, 0))
}
for _, s := range testData {
key := makeKey(s)
expectPresent(t, key, id)(m.Load(key))
expectDeleted(t, key, id)(m.CompareAndDelete(key, id))
expectMissing(t, key, 0)(m.Load(key))
}
for _, s := range testData {
key := makeKey(s)
expectMissing(t, key, 0)(m.Load(key))
}
}(i)
}
wg.Wait()
})
t.Run("ConcurrentDeleteSharedKeys", func(t *testing.T) {
m := newMap()
// Load up the map.
for i, s := range testData {
expectMissing(t, s, 0)(m.Load(s))
expectStored(t, s, i)(m.LoadOrStore(s, i))
}
gmp := runtime.GOMAXPROCS(-1)
var wg sync.WaitGroup
for i := range gmp {
wg.Add(1)
go func(id int) {
defer wg.Done()
for i, s := range testData {
expectNotDeleted(t, s, math.MaxInt)(m.CompareAndDelete(s, math.MaxInt))
m.CompareAndDelete(s, i)
expectMissing(t, s, 0)(m.Load(s))
}
for _, s := range testData {
expectMissing(t, s, 0)(m.Load(s))
}
}(i)
}
wg.Wait()
})
}
func testAll[K, V comparable](t *testing.T, m *HashTrieMap[K, V], testData map[K]V, yield func(K, V) bool) {
for k, v := range testData {
expectStored(t, k, v)(m.LoadOrStore(k, v))
}
visited := make(map[K]int)
m.All()(func(key K, got V) bool {
want, ok := testData[key]
if !ok {
t.Errorf("unexpected key %v in map", key)
return false
}
if got != want {
t.Errorf("expected key %v to have value %v, got %v", key, want, got)
return false
}
visited[key]++
return yield(key, got)
})
for key, n := range visited {
if n > 1 {
t.Errorf("visited key %v more than once", key)
}
}
}
func expectPresent[K, V comparable](t *testing.T, key K, want V) func(got V, ok bool) {
t.Helper()
return func(got V, ok bool) {
t.Helper()
if !ok {
t.Errorf("expected key %v to be present in map", key)
}
if ok && got != want {
t.Errorf("expected key %v to have value %v, got %v", key, want, got)
}
}
}
func expectMissing[K, V comparable](t *testing.T, key K, want V) func(got V, ok bool) {
t.Helper()
if want != *new(V) {
// This is awkward, but the want argument is necessary to smooth over type inference.
// Just make sure the want argument always looks the same.
panic("expectMissing must always have a zero value variable")
}
return func(got V, ok bool) {
t.Helper()
if ok {
t.Errorf("expected key %v to be missing from map, got value %v", key, got)
}
if !ok && got != want {
t.Errorf("expected missing key %v to be paired with the zero value; got %v", key, got)
}
}
}
func expectLoaded[K, V comparable](t *testing.T, key K, want V) func(got V, loaded bool) {
t.Helper()
return func(got V, loaded bool) {
t.Helper()
if !loaded {
t.Errorf("expected key %v to have been loaded, not stored", key)
}
if got != want {
t.Errorf("expected key %v to have value %v, got %v", key, want, got)
}
}
}
func expectStored[K, V comparable](t *testing.T, key K, want V) func(got V, loaded bool) {
t.Helper()
return func(got V, loaded bool) {
t.Helper()
if loaded {
t.Errorf("expected inserted key %v to have been stored, not loaded", key)
}
if got != want {
t.Errorf("expected inserted key %v to have value %v, got %v", key, want, got)
}
}
}
func expectDeleted[K, V comparable](t *testing.T, key K, old V) func(deleted bool) {
t.Helper()
return func(deleted bool) {
t.Helper()
if !deleted {
t.Errorf("expected key %v with value %v to be in map and deleted", key, old)
}
}
}
func expectNotDeleted[K, V comparable](t *testing.T, key K, old V) func(deleted bool) {
t.Helper()
return func(deleted bool) {
t.Helper()
if deleted {
t.Errorf("expected key %v with value %v to not be in map and thus not deleted", key, old)
}
}
}
func testDataMap(data []string) map[string]int {
m := make(map[string]int)
for i, s := range data {
m[s] = i
}
return m
}
var (
testDataSmall [8]string
testData [128]string
testDataLarge [128 << 10]string
)
func init() {
for i := range testDataSmall {
testDataSmall[i] = fmt.Sprintf("%b", i)
}
for i := range testData {
testData[i] = fmt.Sprintf("%b", i)
}
for i := range testDataLarge {
testDataLarge[i] = fmt.Sprintf("%b", i)
}
}
func dumpMap[K, V comparable](ht *HashTrieMap[K, V]) {
dumpNode(ht, &ht.root.node, 0)
}
func dumpNode[K, V comparable](ht *HashTrieMap[K, V], n *node[K, V], depth int) {
var sb strings.Builder
for range depth {
fmt.Fprintf(&sb, "\t")
}
prefix := sb.String()
if n.isEntry {
e := n.entry()
for e != nil {
fmt.Printf("%s%p [Entry Key=%v Value=%v Overflow=%p, Hash=%016x]\n", prefix, e, e.key, e.value, e.overflow.Load(), ht.keyHash(unsafe.Pointer(&e.key), ht.seed))
e = e.overflow.Load()
}
return
}
i := n.indirect()
fmt.Printf("%s%p [Indirect Parent=%p Dead=%t Children=[", prefix, i, i.parent, i.dead.Load())
for j := range i.children {
c := i.children[j].Load()
fmt.Printf("%p", c)
if j != len(i.children)-1 {
fmt.Printf(", ")
}
}
fmt.Printf("]]\n")
for j := range i.children {
c := i.children[j].Load()
if c != nil {
dumpNode(ht, c, depth+1)
}
}
}

View File

@@ -0,0 +1,70 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package copyright
import (
"bytes"
"internal/testenv"
"io"
"io/fs"
"os"
"path/filepath"
"testing"
)
var copyright = []byte("Copyright")
var permitted = [][]byte{
[]byte("// Code generated by "),
[]byte("// Code generated from "),
[]byte("// Created by cgo -cdefs"),
[]byte("// DO NOT EDIT\n// generated by:"),
[]byte("// Empty assembly file"),
[]byte("// Generated using cgo"),
[]byte("// Original source:\n//\thttp://www.zorinaq.com/papers/md5-amd64.html"), // public domain crypto/md5
[]byte("// created by cgo -cdefs"),
[]byte("// go run mkasm.go"),
[]byte("// mkerrors"),
[]byte("// mksys"),
[]byte("// run\n// Code generated by"), // cmd/compile/internal/test/constFold_test.go
}
func TestCopyright(t *testing.T) {
buf := make([]byte, 2048)
filepath.WalkDir(filepath.Join(testenv.GOROOT(t), "src"), func(path string, d fs.DirEntry, err error) error {
if d.IsDir() && (d.Name() == "testdata" || d.Name() == "vendor") {
return filepath.SkipDir
}
switch filepath.Ext(d.Name()) {
default:
return nil
case ".s", ".go":
// check
}
f, err := os.Open(path)
if err != nil {
t.Error(err)
return nil
}
defer f.Close()
n, err := f.Read(buf)
if err != nil && err != io.EOF {
t.Error(err)
return nil
}
b := buf[:n]
if bytes.Contains(b, copyright) {
return nil
}
for _, ok := range permitted {
if bytes.HasPrefix(b, ok) {
return nil
}
}
t.Errorf("%s: missing copyright notice", path)
return nil
})
}

View File

@@ -9,8 +9,8 @@
package cfile
import (
"crypto/md5"
"fmt"
"hash/fnv"
"internal/coverage"
"internal/coverage/encodecounter"
"internal/coverage/encodemeta"
@@ -206,7 +206,7 @@ func prepareForMetaEmit() ([]rtcov.CovMetaBlob, error) {
}
}
h := md5.New()
h := fnv.New128a()
tlen := uint64(unsafe.Sizeof(coverage.MetaFileHeader{}))
for _, entry := range ml {
if _, err := h.Write(entry.Hash[:]); err != nil {

View File

@@ -109,7 +109,7 @@ func ProcessCoverTestDir(dir string, cfile string, cm string, cpkg string, w io.
// Emit text output.
if tf != nil {
if err := ts.cf.EmitTextual(tf); err != nil {
if err := ts.cf.EmitTextual(selpkgs, tf); err != nil {
return err
}
tfClosed = true

View File

@@ -47,8 +47,8 @@ func TestBasics(t *testing.T) {
fm.AddUnit("lit.go", "f3", true, u, 0)
}
var b1, b2, b3, b4 strings.Builder
if err := fm.EmitTextual(&b1); err != nil {
var b1, b2, b3, b4, b5 strings.Builder
if err := fm.EmitTextual(nil, &b1); err != nil {
t.Fatalf("EmitTextual returned %v", err)
}
wantText := strings.TrimSpace(`
@@ -64,6 +64,18 @@ lit.go:99.0,100.0 1 0`)
t.Errorf("emit text: got:\n%s\nwant:\n%s\n", gotText, wantText)
}
selected := []string{"my/pack2"}
if err := fm.EmitTextual(selected, &b5); err != nil {
t.Fatalf("EmitTextual returned %v", err)
}
wantText = strings.TrimSpace(`
mode: atomic
lit.go:99.0,100.0 1 0`)
gotText = strings.TrimSpace(b5.String())
if wantText != gotText {
t.Errorf("emit text: got:\n%s\nwant:\n%s\n", gotText, wantText)
}
// Percent output with no aggregation.
noCoverPkg := ""
if err := fm.EmitPercent(&b2, nil, noCoverPkg, false, false); err != nil {

View File

@@ -24,7 +24,7 @@ package cformat
// }
// }
// myformatter.EmitPercent(os.Stdout, nil, "", true, true)
// myformatter.EmitTextual(somefile)
// myformatter.EmitTextual(nil, somefile)
//
// These apis are linked into tests that are built with "-cover", and
// called at the end of test execution to produce text output or
@@ -36,7 +36,9 @@ import (
"internal/coverage"
"internal/coverage/cmerge"
"io"
"maps"
"slices"
"sort"
"strings"
"text/tabwriter"
)
@@ -162,25 +164,31 @@ func (p *pstate) sortUnits(units []extcu) {
})
}
// EmitTextual writes the accumulated coverage data in the legacy
// cmd/cover text format to the writer 'w'. We sort the data items by
// EmitTextual writes the accumulated coverage data for 'pkgs' in the legacy
// cmd/cover text format to the writer 'w'; if pkgs is empty, text output
// is emitted for all packages recorded. We sort the data items by
// importpath, source file, and line number before emitting (this sorting
// is not explicitly mandated by the format, but seems like a good idea
// for repeatable/deterministic dumps).
func (fm *Formatter) EmitTextual(w io.Writer) error {
func (fm *Formatter) EmitTextual(pkgs []string, w io.Writer) error {
if fm.cm == coverage.CtrModeInvalid {
panic("internal error, counter mode unset")
}
if len(pkgs) == 0 {
pkgs = make([]string, 0, len(fm.pm))
for importpath := range fm.pm {
pkgs = append(pkgs, importpath)
}
}
if _, err := fmt.Fprintf(w, "mode: %s\n", fm.cm.String()); err != nil {
return err
}
pkgs := make([]string, 0, len(fm.pm))
for importpath := range fm.pm {
pkgs = append(pkgs, importpath)
}
slices.Sort(pkgs)
sort.Strings(pkgs)
for _, importpath := range pkgs {
p := fm.pm[importpath]
if p == nil {
continue
}
units := make([]extcu, 0, len(p.unitTable))
for u := range p.unitTable {
units = append(units, u)
@@ -281,14 +289,8 @@ func (fm *Formatter) EmitFuncs(w io.Writer) error {
allStmts := uint64(0)
covStmts := uint64(0)
pkgs := make([]string, 0, len(fm.pm))
for importpath := range fm.pm {
pkgs = append(pkgs, importpath)
}
slices.Sort(pkgs)
// Emit functions for each package, sorted by import path.
for _, importpath := range pkgs {
for _, importpath := range slices.Sorted(maps.Keys(fm.pm)) {
p := fm.pm[importpath]
if len(p.unitTable) == 0 {
continue

View File

@@ -12,9 +12,9 @@ package decodemeta
import (
"bufio"
"crypto/md5"
"encoding/binary"
"fmt"
"hash/fnv"
"internal/coverage"
"internal/coverage/slicereader"
"internal/coverage/stringtab"
@@ -171,8 +171,10 @@ func (r *CoverageMetaFileReader) FileHash() [16]byte {
func (r *CoverageMetaFileReader) GetPackageDecoder(pkIdx uint32, payloadbuf []byte) (*CoverageMetaDataDecoder, []byte, error) {
pp, err := r.GetPackagePayload(pkIdx, payloadbuf)
if r.debug {
h := fnv.New128a()
h.Write(pp)
fmt.Fprintf(os.Stderr, "=-= pkidx=%d payload length is %d hash=%s\n",
pkIdx, len(pp), fmt.Sprintf("%x", md5.Sum(pp)))
pkIdx, len(pp), fmt.Sprintf("%x", h.Sum(nil)))
}
if err != nil {
return nil, nil, err

View File

@@ -13,6 +13,7 @@ import (
"internal/coverage/stringtab"
"internal/coverage/uleb128"
"io"
"maps"
"os"
"slices"
)
@@ -122,11 +123,7 @@ func (cfw *CoverageDataWriter) writeSegmentPreamble(args map[string]string, ws *
}
cfw.csh.StrTabLen = uint32(len(ws.BytesWritten())) - hdrsz
akeys := make([]string, 0, len(args))
for k := range args {
akeys = append(akeys, k)
}
slices.Sort(akeys)
akeys := slices.Sorted(maps.Keys(args))
wrULEB128 := func(v uint) error {
cfw.tmp = cfw.tmp[:0]

View File

@@ -10,10 +10,10 @@ package encodemeta
import (
"bytes"
"crypto/md5"
"encoding/binary"
"fmt"
"hash"
"hash/fnv"
"internal/coverage"
"internal/coverage/stringtab"
"internal/coverage/uleb128"
@@ -39,7 +39,7 @@ func NewCoverageMetaDataBuilder(pkgpath string, pkgname string, modulepath strin
}
x := &CoverageMetaDataBuilder{
tmp: make([]byte, 0, 256),
h: md5.New(),
h: fnv.New128a(),
}
x.stab.InitWriter()
x.stab.Lookup("")
@@ -188,7 +188,7 @@ func (b *CoverageMetaDataBuilder) Emit(w io.WriteSeeker) ([16]byte, error) {
// HashFuncDesc computes an md5 sum of a coverage.FuncDesc and returns
// a digest for it.
func HashFuncDesc(f *coverage.FuncDesc) [16]byte {
h := md5.New()
h := fnv.New128a()
tmp := make([]byte, 0, 32)
hashFuncDesc(h, f, tmp)
var r [16]byte

View File

@@ -6,9 +6,9 @@ package encodemeta
import (
"bufio"
"crypto/md5"
"encoding/binary"
"fmt"
"hash/fnv"
"internal/coverage"
"internal/coverage/stringtab"
"io"
@@ -112,7 +112,9 @@ func (m *CoverageMetaFileWriter) Write(finalHash [16]byte, blobs [][]byte, mode
// Now emit blobs themselves.
for k, blob := range blobs {
if m.debug {
fmt.Fprintf(os.Stderr, "=+= writing blob %d len %d at off=%d hash %s\n", k, len(blob), off2, fmt.Sprintf("%x", md5.Sum(blob)))
h := fnv.New128a()
h.Write(blob)
fmt.Fprintf(os.Stderr, "=+= writing blob %d len %d at off=%d hash %s\n", k, len(blob), off2, fmt.Sprintf("%x", h.Sum(nil)))
}
if _, err = m.w.Write(blob); err != nil {
return fmt.Errorf("error writing %s: %v", m.mfname, err)

View File

@@ -26,9 +26,9 @@ package coverage
// slot: 1 path='internal/goarch' hard-coded id: 2
// slot: 2 path='internal/runtime/atomic' hard-coded id: 3
// slot: 3 path='internal/goos'
// slot: 4 path='runtime/internal/sys' hard-coded id: 5
// slot: 4 path='internal/runtime/sys' hard-coded id: 5
// slot: 5 path='internal/abi' hard-coded id: 4
// slot: 6 path='runtime/internal/math' hard-coded id: 6
// slot: 6 path='internal/runtime/math' hard-coded id: 6
// slot: 7 path='internal/bytealg' hard-coded id: 7
// slot: 8 path='internal/goexperiment'
// slot: 9 path='internal/runtime/syscall' hard-coded id: 8
@@ -50,12 +50,14 @@ var rtPkgs = [...]string{
"internal/runtime/atomic",
"internal/goos",
"internal/chacha8rand",
"runtime/internal/sys",
"internal/runtime/sys",
"internal/abi",
"runtime/internal/math",
"internal/runtime/maps",
"internal/runtime/math",
"internal/bytealg",
"internal/goexperiment",
"internal/runtime/syscall",
"internal/stringslite",
"runtime",
}

View File

@@ -5,8 +5,8 @@
package pods_test
import (
"crypto/md5"
"fmt"
"hash/fnv"
"internal/coverage"
"internal/coverage/pods"
"os"
@@ -35,13 +35,17 @@ func TestPodCollection(t *testing.T) {
}
mkmeta := func(dir string, tag string) string {
hash := md5.Sum([]byte(tag))
h := fnv.New128a()
h.Write([]byte(tag))
hash := h.Sum(nil)
fn := fmt.Sprintf("%s.%x", coverage.MetaFilePref, hash)
return mkfile(dir, fn)
}
mkcounter := func(dir string, tag string, nt int, pid int) string {
hash := md5.Sum([]byte(tag))
h := fnv.New128a()
h.Write([]byte(tag))
hash := h.Sum(nil)
fn := fmt.Sprintf(coverage.CounterFileTempl, coverage.CounterFilePref, hash, pid, nt)
return mkfile(dir, fn)
}
@@ -112,16 +116,16 @@ func TestPodCollection(t *testing.T) {
}
expected := []string{
`o1/covmeta.ae7be26cdaa742ca148068d5ac90eaca [
o1/covcounters.ae7be26cdaa742ca148068d5ac90eaca.40.2 o:0
o1/covcounters.ae7be26cdaa742ca148068d5ac90eaca.41.2 o:0
o1/covcounters.ae7be26cdaa742ca148068d5ac90eaca.42.1 o:0
o2/covcounters.ae7be26cdaa742ca148068d5ac90eaca.35.11 o:1
`o1/covmeta.0880952782ab1be95aa0733055a4d06b [
o1/covcounters.0880952782ab1be95aa0733055a4d06b.40.2 o:0
o1/covcounters.0880952782ab1be95aa0733055a4d06b.41.2 o:0
o1/covcounters.0880952782ab1be95aa0733055a4d06b.42.1 o:0
o2/covcounters.0880952782ab1be95aa0733055a4d06b.35.11 o:1
]`,
`o2/covmeta.aaf2f89992379705dac844c0a2a1d45f [
o2/covcounters.aaf2f89992379705dac844c0a2a1d45f.36.3 o:1
o2/covcounters.aaf2f89992379705dac844c0a2a1d45f.37.2 o:1
o2/covcounters.aaf2f89992379705dac844c0a2a1d45f.38.1 o:1
`o2/covmeta.0880952783ab1be95aa0733055a4d1a6 [
o2/covcounters.0880952783ab1be95aa0733055a4d1a6.36.3 o:1
o2/covcounters.0880952783ab1be95aa0733055a4d1a6.37.2 o:1
o2/covcounters.0880952783ab1be95aa0733055a4d1a6.38.1 o:1
]`,
}
for k, exp := range expected {

View File

@@ -37,6 +37,7 @@ var X86 struct {
HasBMI1 bool
HasBMI2 bool
HasERMS bool
HasFSRM bool
HasFMA bool
HasOSXSAVE bool
HasPCLMULQDQ bool
@@ -72,10 +73,22 @@ var ARM64 struct {
HasCRC32 bool
HasATOMICS bool
HasCPUID bool
HasDIT bool
IsNeoverse bool
_ CacheLinePad
}
// The booleans in Loong64 contain the correspondingly named cpu feature bit.
// The struct is padded to avoid false sharing.
var Loong64 struct {
_ CacheLinePad
HasLSX bool // support 128-bit vector extension
HasCRC32 bool // support CRC instruction
HasLAMCAS bool // support AMCAS[_DB].{B/H/W/D}
HasLAM_BH bool // support AM{SWAP/ADD}[_DB].{B/H} instruction
_ CacheLinePad
}
var MIPS64X struct {
_ CacheLinePad
HasMSA bool // MIPS SIMD architecture
@@ -127,6 +140,7 @@ var S390X struct {
//go:linkname X86
//go:linkname ARM
//go:linkname ARM64
//go:linkname Loong64
//go:linkname MIPS64X
//go:linkname PPC64
//go:linkname S390X

View File

@@ -28,13 +28,15 @@ func doinit() {
func getisar0() uint64
func getpfr0() uint64
func getMIDR() uint64
func extractBits(data uint64, start, end uint) uint {
return (uint)(data>>start) & ((1 << (end - start + 1)) - 1)
}
func parseARM64SystemRegisters(isar0 uint64) {
func parseARM64SystemRegisters(isar0, pfr0 uint64) {
// ID_AA64ISAR0_EL1
switch extractBits(isar0, 4, 7) {
case 1:
@@ -66,4 +68,9 @@ func parseARM64SystemRegisters(isar0 uint64) {
case 2:
ARM64.HasATOMICS = true
}
switch extractBits(pfr0, 48, 51) {
case 1:
ARM64.HasDIT = true
}
}

View File

@@ -11,6 +11,13 @@ TEXT ·getisar0(SB),NOSPLIT,$0
MOVD R0, ret+0(FP)
RET
// func getpfr0() uint64
TEXT ·getpfr0(SB),NOSPLIT,$0-8
// get Processor Feature Register 0 into R0
MRS ID_AA64PFR0_EL1, R0
MOVD R0, ret+0(FP)
RET
// func getMIDR() uint64
TEXT ·getMIDR(SB), NOSPLIT, $0-8
MRS MIDR_EL1, R0

View File

@@ -12,6 +12,7 @@ func osInit() {
ARM64.HasATOMICS = sysctlEnabled([]byte("hw.optional.armv8_1_atomics\x00"))
ARM64.HasCRC32 = sysctlEnabled([]byte("hw.optional.armv8_crc32\x00"))
ARM64.HasSHA512 = sysctlEnabled([]byte("hw.optional.armv8_2_sha512\x00"))
ARM64.HasDIT = sysctlEnabled([]byte("hw.optional.arm.FEAT_DIT\x00"))
// There are no hw.optional sysctl values for the below features on Mac OS 11.0
// to detect their supported state dynamically. Assume the CPU features that

View File

@@ -9,6 +9,7 @@ package cpu
func osInit() {
// Retrieve info from system register ID_AA64ISAR0_EL1.
isar0 := getisar0()
prf0 := getpfr0()
parseARM64SystemRegisters(isar0)
parseARM64SystemRegisters(isar0, prf0)
}

View File

@@ -31,6 +31,7 @@ const (
hwcap_ATOMICS = 1 << 8
hwcap_CPUID = 1 << 11
hwcap_SHA512 = 1 << 21
hwcap_DIT = 1 << 24
)
func hwcapInit(os string) {
@@ -44,6 +45,7 @@ func hwcapInit(os string) {
ARM64.HasCRC32 = isSet(HWCap, hwcap_CRC32)
ARM64.HasCPUID = isSet(HWCap, hwcap_CPUID)
ARM64.HasSHA512 = isSet(HWCap, hwcap_SHA512)
ARM64.HasDIT = isSet(HWCap, hwcap_DIT)
// The Samsung S9+ kernel reports support for atomics, but not all cores
// actually support them, resulting in SIGILL. See issue #28431.

View File

@@ -13,6 +13,7 @@ const (
// From OpenBSD's machine/cpu.h.
_CPU_ID_AA64ISAR0 = 2
_CPU_ID_AA64ISAR1 = 3
_CPU_ID_AA64PFR0 = 8
)
//go:noescape
@@ -24,5 +25,11 @@ func osInit() {
if !ok {
return
}
parseARM64SystemRegisters(isar0)
// Get ID_AA64PFR0 from sysctl.
pfr0, ok := sysctlUint64([]uint32{_CTL_MACHDEP, _CPU_ID_AA64PFR0})
if !ok {
return
}
parseARM64SystemRegisters(isar0, pfr0)
}

View File

@@ -10,4 +10,45 @@ package cpu
// We choose 64 because Loongson 3A5000 the L1 Dcache is 4-way 256-line 64-byte-per-line.
const CacheLinePadSize = 64
func doinit() {}
// Bit fields for CPUCFG registers, Related reference documents:
// https://loongson.github.io/LoongArch-Documentation/LoongArch-Vol1-EN.html#_cpucfg
const (
// CPUCFG1 bits
cpucfg1_CRC32 = 1 << 25
// CPUCFG2 bits
cpucfg2_LAM_BH = 1 << 27
cpucfg2_LAMCAS = 1 << 28
)
// get_cpucfg is implemented in cpu_loong64.s.
func get_cpucfg(reg uint32) uint32
func doinit() {
options = []option{
{Name: "lsx", Feature: &Loong64.HasLSX},
{Name: "crc32", Feature: &Loong64.HasCRC32},
{Name: "lamcas", Feature: &Loong64.HasLAMCAS},
{Name: "lam_bh", Feature: &Loong64.HasLAM_BH},
}
// The CPUCFG data on Loong64 only reflects the hardware capabilities,
// not the kernel support status, so features such as LSX and LASX that
// require kernel support cannot be obtained from the CPUCFG data.
//
// These features only require hardware capability support and do not
// require kernel specific support, so they can be obtained directly
// through CPUCFG
cfg1 := get_cpucfg(1)
cfg2 := get_cpucfg(2)
Loong64.HasCRC32 = cfgIsSet(cfg1, cpucfg1_CRC32)
Loong64.HasLAMCAS = cfgIsSet(cfg2, cpucfg2_LAM_BH)
Loong64.HasLAM_BH = cfgIsSet(cfg2, cpucfg2_LAMCAS)
osInit()
}
func cfgIsSet(cfg uint32, val uint32) bool {
return cfg&val != 0
}

View File

@@ -0,0 +1,12 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
#include "textflag.h"
// func get_cpucfg(reg uint32) uint32
TEXT ·get_cpucfg(SB), NOSPLIT|NOFRAME, $0-12
MOVW reg+0(FP), R5
CPUCFG R5, R4
MOVW R4, ret+8(FP)
RET

View File

@@ -0,0 +1,26 @@
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build loong64 && linux
package cpu
// This is initialized by archauxv and should not be changed after it is
// initialized.
var HWCap uint
// HWCAP bits. These are exposed by the Linux kernel.
const (
hwcap_LOONGARCH_LSX = 1 << 4
)
func hwcapInit() {
// TODO: Features that require kernel support like LSX and LASX can
// be detected here once needed in std library or by the compiler.
Loong64.HasLSX = hwcIsSet(HWCap, hwcap_LOONGARCH_LSX)
}
func hwcIsSet(hwc uint, val uint) bool {
return hwc&val != 0
}

View File

@@ -0,0 +1,11 @@
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build loong64 && linux
package cpu
func osInit() {
hwcapInit()
}

View File

@@ -40,7 +40,8 @@ const (
cpuid_SHA = 1 << 29
cpuid_AVX512BW = 1 << 30
cpuid_AVX512VL = 1 << 31
// edx bits
cpuid_FSRM = 1 << 4
// edx bits for CPUID 0x80000001
cpuid_RDTSCP = 1 << 27
)
@@ -52,6 +53,7 @@ func doinit() {
{Name: "adx", Feature: &X86.HasADX},
{Name: "aes", Feature: &X86.HasAES},
{Name: "erms", Feature: &X86.HasERMS},
{Name: "fsrm", Feature: &X86.HasFSRM},
{Name: "pclmulqdq", Feature: &X86.HasPCLMULQDQ},
{Name: "rdtscp", Feature: &X86.HasRDTSCP},
{Name: "sha", Feature: &X86.HasSHA},
@@ -137,7 +139,7 @@ func doinit() {
return
}
_, ebx7, _, _ := cpuid(7, 0)
_, ebx7, _, edx7 := cpuid(7, 0)
X86.HasBMI1 = isSet(ebx7, cpuid_BMI1)
X86.HasAVX2 = isSet(ebx7, cpuid_AVX2) && osSupportsAVX
X86.HasBMI2 = isSet(ebx7, cpuid_BMI2)
@@ -151,6 +153,8 @@ func doinit() {
X86.HasAVX512VL = isSet(ebx7, cpuid_AVX512VL)
}
X86.HasFSRM = isSet(edx7, cpuid_FSRM)
var maxExtendedInformation uint32
maxExtendedInformation, _, _, _ = cpuid(0x80000000, 0)

View File

@@ -5,7 +5,7 @@
package dag
import (
"reflect"
"slices"
"strings"
"testing"
)
@@ -26,7 +26,7 @@ func TestTopo(t *testing.T) {
//
// "a" is a leaf.
wantNodes := strings.Fields("d c b a")
if !reflect.DeepEqual(wantNodes, got) {
if !slices.Equal(wantNodes, got) {
t.Fatalf("want topo sort %v, got %v", wantNodes, got)
}
}

View File

@@ -5,7 +5,7 @@
package dag
import (
"reflect"
"slices"
"strings"
"testing"
)
@@ -52,7 +52,7 @@ func TestParse(t *testing.T) {
g := mustParse(t, diamond)
wantNodes := strings.Fields("a b c d")
if !reflect.DeepEqual(wantNodes, g.Nodes) {
if !slices.Equal(wantNodes, g.Nodes) {
t.Fatalf("want nodes %v, got %v", wantNodes, g.Nodes)
}

View File

@@ -0,0 +1,355 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package exportdata implements common utilities for finding
// and reading gc-generated object files.
package exportdata
// This file should be kept in sync with src/cmd/compile/internal/gc/obj.go .
import (
"bufio"
"bytes"
"errors"
"fmt"
"go/build"
"internal/saferio"
"io"
"os"
"os/exec"
"path/filepath"
"strings"
"sync"
)
// ReadUnified reads the contents of the unified export data from a reader r
// that contains the contents of a GC-created archive file.
//
// On success, the reader will be positioned after the end-of-section marker "\n$$\n".
//
// Supported GC-created archive files have 4 layers of nesting:
// - An archive file containing a package definition file.
// - The package definition file contains headers followed by a data section.
// Headers are lines (≤ 4kb) that do not start with "$$".
// - The data section starts with "$$B\n" followed by export data followed
// by an end of section marker "\n$$\n". (The section start "$$\n" is no
// longer supported.)
// - The export data starts with a format byte ('u') followed by the <data> in
// the given format. (See ReadExportDataHeader for older formats.)
//
// Putting this together, the bytes in a GC-created archive files are expected
// to look like the following.
// See cmd/internal/archive for more details on ar file headers.
//
// | <!arch>\n | ar file signature
// | __.PKGDEF...size...\n | ar header for __.PKGDEF including size.
// | go object <...>\n | objabi header
// | <optional headers>\n | other headers such as build id
// | $$B\n | binary format marker
// | u<data>\n | unified export <data>
// | $$\n | end-of-section marker
// | [optional padding] | padding byte (0x0A) if size is odd
// | [ar file header] | other ar files
// | [ar file data] |
func ReadUnified(r *bufio.Reader) (data []byte, err error) {
// We historically guaranteed headers at the default buffer size (4096) work.
// This ensures we can use ReadSlice throughout.
const minBufferSize = 4096
r = bufio.NewReaderSize(r, minBufferSize)
size, err := FindPackageDefinition(r)
if err != nil {
return
}
n := size
objapi, headers, err := ReadObjectHeaders(r)
if err != nil {
return
}
n -= len(objapi)
for _, h := range headers {
n -= len(h)
}
hdrlen, err := ReadExportDataHeader(r)
if err != nil {
return
}
n -= hdrlen
// size also includes the end of section marker. Remove that many bytes from the end.
const marker = "\n$$\n"
n -= len(marker)
if n < 0 {
err = fmt.Errorf("invalid size (%d) in the archive file: %d bytes remain without section headers (recompile package)", size, n)
return
}
// Read n bytes from buf.
data, err = saferio.ReadData(r, uint64(n))
if err != nil {
return
}
// Check for marker at the end.
var suffix [len(marker)]byte
_, err = io.ReadFull(r, suffix[:])
if err != nil {
return
}
if s := string(suffix[:]); s != marker {
err = fmt.Errorf("read %q instead of end-of-section marker (%q)", s, marker)
return
}
return
}
// FindPackageDefinition positions the reader r at the beginning of a package
// definition file ("__.PKGDEF") within a GC-created archive by reading
// from it, and returns the size of the package definition file in the archive.
//
// The reader must be positioned at the start of the archive file before calling
// this function, and "__.PKGDEF" is assumed to be the first file in the archive.
//
// See cmd/internal/archive for details on the archive format.
func FindPackageDefinition(r *bufio.Reader) (size int, err error) {
// Uses ReadSlice to limit risk of malformed inputs.
// Read first line to make sure this is an object file.
line, err := r.ReadSlice('\n')
if err != nil {
err = fmt.Errorf("can't find export data (%v)", err)
return
}
// Is the first line an archive file signature?
if string(line) != "!<arch>\n" {
err = fmt.Errorf("not the start of an archive file (%q)", line)
return
}
// package export block should be first
size = readArchiveHeader(r, "__.PKGDEF")
if size <= 0 {
err = fmt.Errorf("not a package file")
return
}
return
}
// ReadObjectHeaders reads object headers from the reader. Object headers are
// lines that do not start with an end-of-section marker "$$". The first header
// is the objabi header. On success, the reader will be positioned at the beginning
// of the end-of-section marker.
//
// It returns an error if any header does not fit in r.Size() bytes.
func ReadObjectHeaders(r *bufio.Reader) (objapi string, headers []string, err error) {
// line is a temporary buffer for headers.
// Use bounded reads (ReadSlice, Peek) to limit risk of malformed inputs.
var line []byte
// objapi header should be the first line
if line, err = r.ReadSlice('\n'); err != nil {
err = fmt.Errorf("can't find export data (%v)", err)
return
}
objapi = string(line)
// objapi header begins with "go object ".
if !strings.HasPrefix(objapi, "go object ") {
err = fmt.Errorf("not a go object file: %s", objapi)
return
}
// process remaining object header lines
for {
// check for an end of section marker "$$"
line, err = r.Peek(2)
if err != nil {
return
}
if string(line) == "$$" {
return // stop
}
// read next header
line, err = r.ReadSlice('\n')
if err != nil {
return
}
headers = append(headers, string(line))
}
}
// ReadExportDataHeader reads the export data header and format from r.
// It returns the number of bytes read, or an error if the format is no longer
// supported or it failed to read.
//
// The only currently supported format is binary export data in the
// unified export format.
func ReadExportDataHeader(r *bufio.Reader) (n int, err error) {
// Read export data header.
line, err := r.ReadSlice('\n')
if err != nil {
return
}
hdr := string(line)
switch hdr {
case "$$\n":
err = fmt.Errorf("old textual export format no longer supported (recompile package)")
return
case "$$B\n":
var format byte
format, err = r.ReadByte()
if err != nil {
return
}
// The unified export format starts with a 'u'.
switch format {
case 'u':
default:
// Older no longer supported export formats include:
// indexed export format which started with an 'i'; and
// the older binary export format which started with a 'c',
// 'd', or 'v' (from "version").
err = fmt.Errorf("binary export format %q is no longer supported (recompile package)", format)
return
}
default:
err = fmt.Errorf("unknown export data header: %q", hdr)
return
}
n = len(hdr) + 1 // + 1 is for 'u'
return
}
// FindPkg returns the filename and unique package id for an import
// path based on package information provided by build.Import (using
// the build.Default build.Context). A relative srcDir is interpreted
// relative to the current working directory.
func FindPkg(path, srcDir string) (filename, id string, err error) {
if path == "" {
return "", "", errors.New("path is empty")
}
var noext string
switch {
default:
// "x" -> "$GOPATH/pkg/$GOOS_$GOARCH/x.ext", "x"
// Don't require the source files to be present.
if abs, err := filepath.Abs(srcDir); err == nil { // see issue 14282
srcDir = abs
}
var bp *build.Package
bp, err = build.Import(path, srcDir, build.FindOnly|build.AllowBinary)
if bp.PkgObj == "" {
if bp.Goroot && bp.Dir != "" {
filename, err = lookupGorootExport(bp.Dir)
if err == nil {
_, err = os.Stat(filename)
}
if err == nil {
return filename, bp.ImportPath, nil
}
}
goto notfound
} else {
noext = strings.TrimSuffix(bp.PkgObj, ".a")
}
id = bp.ImportPath
case build.IsLocalImport(path):
// "./x" -> "/this/directory/x.ext", "/this/directory/x"
noext = filepath.Join(srcDir, path)
id = noext
case filepath.IsAbs(path):
// for completeness only - go/build.Import
// does not support absolute imports
// "/x" -> "/x.ext", "/x"
noext = path
id = path
}
if false { // for debugging
if path != id {
fmt.Printf("%s -> %s\n", path, id)
}
}
// try extensions
for _, ext := range pkgExts {
filename = noext + ext
f, statErr := os.Stat(filename)
if statErr == nil && !f.IsDir() {
return filename, id, nil
}
if err == nil {
err = statErr
}
}
notfound:
if err == nil {
return "", path, fmt.Errorf("can't find import: %q", path)
}
return "", path, fmt.Errorf("can't find import: %q: %w", path, err)
}
var pkgExts = [...]string{".a", ".o"} // a file from the build cache will have no extension
var exportMap sync.Map // package dir → func() (string, error)
// lookupGorootExport returns the location of the export data
// (normally found in the build cache, but located in GOROOT/pkg
// in prior Go releases) for the package located in pkgDir.
//
// (We use the package's directory instead of its import path
// mainly to simplify handling of the packages in src/vendor
// and cmd/vendor.)
func lookupGorootExport(pkgDir string) (string, error) {
f, ok := exportMap.Load(pkgDir)
if !ok {
var (
listOnce sync.Once
exportPath string
err error
)
f, _ = exportMap.LoadOrStore(pkgDir, func() (string, error) {
listOnce.Do(func() {
cmd := exec.Command(filepath.Join(build.Default.GOROOT, "bin", "go"), "list", "-export", "-f", "{{.Export}}", pkgDir)
cmd.Dir = build.Default.GOROOT
cmd.Env = append(os.Environ(), "PWD="+cmd.Dir, "GOROOT="+build.Default.GOROOT)
var output []byte
output, err = cmd.Output()
if err != nil {
if ee, ok := err.(*exec.ExitError); ok && len(ee.Stderr) > 0 {
err = errors.New(string(ee.Stderr))
}
return
}
exports := strings.Split(string(bytes.TrimSpace(output)), "\n")
if len(exports) != 1 {
err = fmt.Errorf("go list reported %d exports; expected 1", len(exports))
return
}
exportPath = exports[0]
})
return exportPath, err
})
}
return f.(func() (string, error))()
}

View File

@@ -0,0 +1,32 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file contains support functions for exportdata.
package exportdata
import (
"bufio"
"io"
"strconv"
"strings"
)
// Copy of cmd/internal/archive.ReadHeader.
func readArchiveHeader(b *bufio.Reader, name string) int {
// architecture-independent object file output
const HeaderSize = 60
var buf [HeaderSize]byte
if _, err := io.ReadFull(b, buf[:]); err != nil {
return -1
}
aname := strings.Trim(string(buf[0:16]), " ")
if !strings.HasPrefix(aname, name) {
return -1
}
asize := strings.Trim(string(buf[48:58]), " ")
i, _ := strconv.Atoi(asize)
return i
}

View File

@@ -6,6 +6,7 @@ package fuzz
import (
"bytes"
"fmt"
"testing"
)
@@ -33,12 +34,6 @@ func (mr *mockRand) uint32n(n uint32) uint32 {
return uint32(c) % n
}
func (mr *mockRand) exp2() int {
c := mr.values[mr.counter]
mr.counter++
return c
}
func (mr *mockRand) bool() bool {
b := mr.b
mr.b = !mr.b
@@ -184,3 +179,43 @@ func TestByteSliceMutators(t *testing.T) {
})
}
}
func BenchmarkByteSliceMutators(b *testing.B) {
tests := [...]struct {
name string
mutator func(*mutator, []byte) []byte
}{
{"RemoveBytes", byteSliceRemoveBytes},
{"InsertRandomBytes", byteSliceInsertRandomBytes},
{"DuplicateBytes", byteSliceDuplicateBytes},
{"OverwriteBytes", byteSliceOverwriteBytes},
{"BitFlip", byteSliceBitFlip},
{"XORByte", byteSliceXORByte},
{"SwapByte", byteSliceSwapByte},
{"ArithmeticUint8", byteSliceArithmeticUint8},
{"ArithmeticUint16", byteSliceArithmeticUint16},
{"ArithmeticUint32", byteSliceArithmeticUint32},
{"ArithmeticUint64", byteSliceArithmeticUint64},
{"OverwriteInterestingUint8", byteSliceOverwriteInterestingUint8},
{"OverwriteInterestingUint16", byteSliceOverwriteInterestingUint16},
{"OverwriteInterestingUint32", byteSliceOverwriteInterestingUint32},
{"InsertConstantBytes", byteSliceInsertConstantBytes},
{"OverwriteConstantBytes", byteSliceOverwriteConstantBytes},
{"ShuffleBytes", byteSliceShuffleBytes},
{"SwapBytes", byteSliceSwapBytes},
}
for _, tc := range tests {
b.Run(tc.name, func(b *testing.B) {
for size := 64; size <= 1024; size *= 2 {
b.Run(fmt.Sprintf("%d", size), func(b *testing.B) {
m := &mutator{r: newPcgRand()}
input := make([]byte, size)
for i := 0; i < b.N; i++ {
tc.mutator(m, input)
}
})
}
})
}
}

View File

@@ -17,7 +17,6 @@ type mutatorRand interface {
uint32() uint32
intn(int) int
uint32n(uint32) uint32
exp2() int
bool() bool
save(randState, randInc *uint64)
@@ -123,11 +122,6 @@ func (r *pcgRand) uint32n(n uint32) uint32 {
return uint32(prod >> 32)
}
// exp2 generates n with probability 1/2^(n+1).
func (r *pcgRand) exp2() int {
return bits.TrailingZeros32(r.uint32())
}
// bool generates a random bool.
func (r *pcgRand) bool() bool {
return r.uint32()&1 == 0

View File

@@ -682,7 +682,7 @@ func (ws *workerServer) serve(ctx context.Context) error {
}
// chainedMutations is how many mutations are applied before the worker
// resets the input to it's original state.
// resets the input to its original state.
// NOTE: this number was picked without much thought. It is low enough that
// it seems to create a significant diversity in mutated inputs. We may want
// to consider looking into this more closely once we have a proper performance

View File

@@ -17,11 +17,11 @@ import (
var goarches []string
func main() {
data, err := os.ReadFile("../../go/build/syslist.go")
data, err := os.ReadFile("../../internal/syslist/syslist.go")
if err != nil {
log.Fatal(err)
}
const goarchPrefix = `var knownArch = map[string]bool{`
const goarchPrefix = `var KnownArch = map[string]bool{`
inGOARCH := false
for _, line := range strings.Split(string(data), "\n") {
if strings.HasPrefix(line, goarchPrefix) {

View File

@@ -11,7 +11,6 @@ import (
"internal/testenv"
"os"
"os/exec"
"reflect"
"runtime/metrics"
"slices"
"strings"
@@ -110,6 +109,9 @@ func TestCmdBisect(t *testing.T) {
var want []string
src, err := os.ReadFile("godebug_test.go")
if err != nil {
t.Fatal(err)
}
for i, line := range strings.Split(string(src), "\n") {
if strings.Contains(line, "BISECT"+" "+"BUG") {
want = append(want, fmt.Sprintf("godebug_test.go:%d", i+1))
@@ -125,7 +127,7 @@ func TestCmdBisect(t *testing.T) {
}
slices.Sort(have)
if !reflect.DeepEqual(have, want) {
if !slices.Equal(have, want) {
t.Errorf("bad bisect output:\nhave %v\nwant %v\ncomplete output:\n%s", have, want, string(out))
}
}

View File

@@ -26,10 +26,12 @@ type Info struct {
// (Otherwise the test in this package will fail.)
var All = []Info{
{Name: "asynctimerchan", Package: "time", Changed: 23, Old: "1"},
{Name: "dataindependenttiming", Package: "crypto/subtle", Opaque: true},
{Name: "execerrdot", Package: "os/exec"},
{Name: "gocachehash", Package: "cmd/go"},
{Name: "gocachetest", Package: "cmd/go"},
{Name: "gocacheverify", Package: "cmd/go"},
{Name: "gotestjsonbuildtext", Package: "cmd/go", Changed: 24, Old: "1"},
{Name: "gotypesalias", Package: "go/types", Changed: 23, Old: "0"},
{Name: "http2client", Package: "net/http"},
{Name: "http2debug", Package: "net/http", Opaque: true},
@@ -42,25 +44,27 @@ var All = []Info{
//{Name: "multipartfiles", Package: "mime/multipart"},
{Name: "multipartmaxheaders", Package: "mime/multipart"},
{Name: "multipartmaxparts", Package: "mime/multipart"},
{Name: "multipathtcp", Package: "net"},
{Name: "multipathtcp", Package: "net", Changed: 24, Old: "0"},
{Name: "netdns", Package: "net", Opaque: true},
{Name: "netedns0", Package: "net", Changed: 19, Old: "0"},
{Name: "panicnil", Package: "runtime", Changed: 21, Old: "1"},
{Name: "randautoseed", Package: "math/rand"},
{Name: "randseednop", Package: "math/rand", Changed: 24, Old: "0"},
{Name: "rsa1024min", Package: "crypto/rsa", Changed: 24, Old: "0"},
{Name: "tarinsecurepath", Package: "archive/tar"},
{Name: "tls10server", Package: "crypto/tls", Changed: 22, Old: "1"},
{Name: "tls3des", Package: "crypto/tls", Changed: 23, Old: "1"},
{Name: "tlskyber", Package: "crypto/tls", Changed: 23, Old: "0", Opaque: true},
{Name: "tlsmaxrsasize", Package: "crypto/tls"},
{Name: "tlsmlkem", Package: "crypto/tls", Changed: 24, Old: "0", Opaque: true},
{Name: "tlsrsakex", Package: "crypto/tls", Changed: 22, Old: "1"},
{Name: "tlsunsafeekm", Package: "crypto/tls", Changed: 22, Old: "1"},
{Name: "winreadlinkvolume", Package: "os", Changed: 22, Old: "0"},
{Name: "winsymlink", Package: "os", Changed: 22, Old: "0"},
{Name: "x509keypairleaf", Package: "crypto/tls", Changed: 23, Old: "0"},
{Name: "x509negativeserial", Package: "crypto/x509", Changed: 23, Old: "1"},
{Name: "x509sha1", Package: "crypto/x509"},
{Name: "x509rsacrt", Package: "crypto/x509", Changed: 24, Old: "0"},
{Name: "x509usefallbackroots", Package: "crypto/x509"},
{Name: "x509usepolicies", Package: "crypto/x509"},
{Name: "x509usepolicies", Package: "crypto/x509", Changed: 24, Old: "0"},
{Name: "zipinsecurepath", Package: "archive/zip"},
}

View File

@@ -0,0 +1,8 @@
// Code generated by mkconsts.go. DO NOT EDIT.
//go:build !goexperiment.spinbitmutex
package goexperiment
const SpinbitMutex = false
const SpinbitMutexInt = 0

View File

@@ -0,0 +1,8 @@
// Code generated by mkconsts.go. DO NOT EDIT.
//go:build goexperiment.spinbitmutex
package goexperiment
const SpinbitMutex = true
const SpinbitMutexInt = 1

View File

@@ -0,0 +1,8 @@
// Code generated by mkconsts.go. DO NOT EDIT.
//go:build !goexperiment.swissmap
package goexperiment
const SwissMap = false
const SwissMapInt = 0

View File

@@ -0,0 +1,8 @@
// Code generated by mkconsts.go. DO NOT EDIT.
//go:build goexperiment.swissmap
package goexperiment
const SwissMap = true
const SwissMapInt = 1

View File

@@ -0,0 +1,8 @@
// Code generated by mkconsts.go. DO NOT EDIT.
//go:build !goexperiment.synchashtriemap
package goexperiment
const SyncHashTrieMap = false
const SyncHashTrieMapInt = 0

View File

@@ -0,0 +1,8 @@
// Code generated by mkconsts.go. DO NOT EDIT.
//go:build goexperiment.synchashtriemap
package goexperiment
const SyncHashTrieMap = true
const SyncHashTrieMapInt = 1

View File

@@ -0,0 +1,8 @@
// Code generated by mkconsts.go. DO NOT EDIT.
//go:build !goexperiment.synctest
package goexperiment
const Synctest = false
const SynctestInt = 0

View File

@@ -0,0 +1,8 @@
// Code generated by mkconsts.go. DO NOT EDIT.
//go:build goexperiment.synctest
package goexperiment
const Synctest = true
const SynctestInt = 1

View File

@@ -51,7 +51,7 @@ package goexperiment
// tags, experiments use the strings.ToLower of their field name.
//
// For the baseline experimental configuration, see
// objabi.experimentBaseline.
// [internal/buildcfg.ParseGOEXPERIMENT].
//
// If you change this struct definition, run "go generate".
type Flags struct {
@@ -113,6 +113,19 @@ type Flags struct {
// AliasTypeParams enables type parameters for alias types.
// Requires that gotypesalias=1 is set with GODEBUG.
// This flag will be removed with Go 1.24.
// This flag will be removed with Go 1.25.
AliasTypeParams bool
// SwissMap enables the SwissTable-based map implementation.
SwissMap bool
// SpinbitMutex enables the new "spinbit" mutex implementation on supported
// platforms. See https://go.dev/issue/68578.
SpinbitMutex bool
// SyncHashTrieMap enables the HashTrieMap sync.Map implementation.
SyncHashTrieMap bool
// Synctest enables the testing/synctest package.
Synctest bool
}

View File

@@ -17,11 +17,11 @@ import (
var gooses []string
func main() {
data, err := os.ReadFile("../../go/build/syslist.go")
data, err := os.ReadFile("../../internal/syslist/syslist.go")
if err != nil {
log.Fatal(err)
}
const goosPrefix = `var knownOS = map[string]bool{`
const goosPrefix = `var KnownOS = map[string]bool{`
inGOOS := false
for _, line := range strings.Split(string(data), "\n") {
if strings.HasPrefix(line, goosPrefix) {

View File

@@ -8,5 +8,5 @@ package goversion
// in development and will eventually get released.
//
// It should be updated at the start of each development cycle to be
// the version of the next Go 1.x release. See golang.org/issue/40705.
const Version = 23
// the version of the next Go 1.x release. See go.dev/issue/40705.
const Version = 24

View File

@@ -21,7 +21,7 @@ import (
// export data.
type PkgDecoder struct {
// version is the file format version.
version uint32
version Version
// sync indicates whether the file uses sync markers.
sync bool
@@ -68,8 +68,6 @@ func (pr *PkgDecoder) SyncMarkers() bool { return pr.sync }
// NewPkgDecoder returns a PkgDecoder initialized to read the Unified
// IR export data from input. pkgPath is the package path for the
// compilation unit that produced the export data.
//
// TODO(mdempsky): Remove pkgPath parameter; unneeded since CL 391014.
func NewPkgDecoder(pkgPath, input string) PkgDecoder {
pr := PkgDecoder{
pkgPath: pkgPath,
@@ -80,14 +78,15 @@ func NewPkgDecoder(pkgPath, input string) PkgDecoder {
r := strings.NewReader(input)
assert(binary.Read(r, binary.LittleEndian, &pr.version) == nil)
var ver uint32
assert(binary.Read(r, binary.LittleEndian, &ver) == nil)
pr.version = Version(ver)
switch pr.version {
default:
panic(fmt.Errorf("unsupported version: %v", pr.version))
case 0:
// no flags
case 1:
if pr.version >= numVersions {
panic(fmt.Errorf("cannot decode %q, export data version %d is greater than maximum supported version %d", pkgPath, pr.version, numVersions-1))
}
if pr.version.Has(Flags) {
var flags uint32
assert(binary.Read(r, binary.LittleEndian, &flags) == nil)
pr.sync = flags&flagSyncMarkers != 0
@@ -102,7 +101,9 @@ func NewPkgDecoder(pkgPath, input string) PkgDecoder {
assert(err == nil)
pr.elemData = input[pos:]
assert(len(pr.elemData)-8 == int(pr.elemEnds[len(pr.elemEnds)-1]))
const fingerprintSize = 8
assert(len(pr.elemData)-fingerprintSize == int(pr.elemEnds[len(pr.elemEnds)-1]))
return pr
}
@@ -136,7 +137,7 @@ func (pr *PkgDecoder) AbsIdx(k RelocKind, idx Index) int {
absIdx += int(pr.elemEndsEnds[k-1])
}
if absIdx >= int(pr.elemEndsEnds[k]) {
errorf("%v:%v is out of bounds; %v", k, idx, pr.elemEndsEnds)
panicf("%v:%v is out of bounds; %v", k, idx, pr.elemEndsEnds)
}
return absIdx
}
@@ -242,7 +243,7 @@ type Decoder struct {
func (r *Decoder) checkErr(err error) {
if err != nil {
errorf("unexpected decoding error: %w", err)
panicf("unexpected decoding error: %w", err)
}
}
@@ -371,7 +372,7 @@ func (r *Decoder) Int64() int64 {
return r.rawVarint()
}
// Int64 decodes and returns a uint64 value from the element bitstream.
// Uint64 decodes and returns a uint64 value from the element bitstream.
func (r *Decoder) Uint64() uint64 {
r.Sync(SyncUint64)
return r.rawUvarint()
@@ -513,3 +514,6 @@ func (pr *PkgDecoder) PeekObj(idx Index) (string, string, CodeObj) {
return path, name, tag
}
// Version reports the version of the bitstream.
func (w *Decoder) Version() Version { return w.common.version }

View File

@@ -6,7 +6,7 @@ package pkgbits
import (
"bytes"
"crypto/md5"
"crypto/sha256"
"encoding/binary"
"go/constant"
"io"
@@ -15,20 +15,12 @@ import (
"strings"
)
// currentVersion is the current version number.
//
// - v0: initial prototype
//
// - v1: adds the flags uint32 word
//
// TODO(mdempsky): For the next version bump:
// - remove the legacy "has init" bool from the public root
// - remove obj's "derived func instance" bool
const currentVersion uint32 = 1
// A PkgEncoder provides methods for encoding a package's Unified IR
// export data.
type PkgEncoder struct {
// version of the bitstream.
version Version
// elems holds the bitstream for previously encoded elements.
elems [numRelocs][]string
@@ -52,8 +44,9 @@ func (pw *PkgEncoder) SyncMarkers() bool { return pw.syncFrames >= 0 }
// export data files, but can help diagnosing desync errors in
// higher-level Unified IR reader/writer code. If syncFrames is
// negative, then sync markers are omitted entirely.
func NewPkgEncoder(syncFrames int) PkgEncoder {
func NewPkgEncoder(version Version, syncFrames int) PkgEncoder {
return PkgEncoder{
version: version,
stringsIdx: make(map[string]Index),
syncFrames: syncFrames,
}
@@ -62,20 +55,22 @@ func NewPkgEncoder(syncFrames int) PkgEncoder {
// DumpTo writes the package's encoded data to out0 and returns the
// package fingerprint.
func (pw *PkgEncoder) DumpTo(out0 io.Writer) (fingerprint [8]byte) {
h := md5.New()
h := sha256.New()
out := io.MultiWriter(out0, h)
writeUint32 := func(x uint32) {
assert(binary.Write(out, binary.LittleEndian, x) == nil)
}
writeUint32(currentVersion)
writeUint32(uint32(pw.version))
var flags uint32
if pw.SyncMarkers() {
flags |= flagSyncMarkers
if pw.version.Has(Flags) {
var flags uint32
if pw.SyncMarkers() {
flags |= flagSyncMarkers
}
writeUint32(flags)
}
writeUint32(flags)
// Write elemEndsEnds.
var sum uint32
@@ -194,7 +189,7 @@ func (w *Encoder) Flush() Index {
func (w *Encoder) checkErr(err error) {
if err != nil {
errorf("unexpected encoding error: %v", err)
panicf("unexpected encoding error: %v", err)
}
}
@@ -298,7 +293,7 @@ func (w *Encoder) Len(x int) { assert(x >= 0); w.Uint64(uint64(x)) }
// Int encodes and writes an int value into the element bitstream.
func (w *Encoder) Int(x int) { w.Int64(int64(x)) }
// Len encodes and writes a uint value into the element bitstream.
// Uint encodes and writes a uint value into the element bitstream.
func (w *Encoder) Uint(x uint) { w.Uint64(uint64(x)) }
// Reloc encodes and writes a relocation for the given (section,
@@ -359,7 +354,7 @@ func (w *Encoder) Value(val constant.Value) {
func (w *Encoder) scalar(val constant.Value) {
switch v := constant.Val(val).(type) {
default:
errorf("unhandled %v (%v)", val, val.Kind())
panicf("unhandled %v (%v)", val, val.Kind())
case bool:
w.Code(ValBool)
w.Bool(v)
@@ -392,3 +387,6 @@ func (w *Encoder) bigFloat(v *big.Float) {
b := v.Append(nil, 'p', -1)
w.String(string(b)) // TODO: More efficient encoding.
}
// Version reports the version of the bitstream.
func (w *Encoder) Version() Version { return w.p.version }

View File

@@ -0,0 +1,76 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package pkgbits_test
import (
"internal/pkgbits"
"strings"
"testing"
)
func TestRoundTrip(t *testing.T) {
for _, version := range []pkgbits.Version{
pkgbits.V0,
pkgbits.V1,
pkgbits.V2,
} {
pw := pkgbits.NewPkgEncoder(version, -1)
w := pw.NewEncoder(pkgbits.RelocMeta, pkgbits.SyncPublic)
w.Flush()
var b strings.Builder
_ = pw.DumpTo(&b)
input := b.String()
pr := pkgbits.NewPkgDecoder("package_id", input)
r := pr.NewDecoder(pkgbits.RelocMeta, pkgbits.PublicRootIdx, pkgbits.SyncPublic)
if r.Version() != w.Version() {
t.Errorf("Expected reader version %q to be the writer version %q", r.Version(), w.Version())
}
}
}
// Type checker to enforce that know V* have the constant values they must have.
var _ [0]bool = [pkgbits.V0]bool{}
var _ [1]bool = [pkgbits.V1]bool{}
func TestVersions(t *testing.T) {
type vfpair struct {
v pkgbits.Version
f pkgbits.Field
}
// has field tests
for _, c := range []vfpair{
{pkgbits.V1, pkgbits.Flags},
{pkgbits.V2, pkgbits.Flags},
{pkgbits.V0, pkgbits.HasInit},
{pkgbits.V1, pkgbits.HasInit},
{pkgbits.V0, pkgbits.DerivedFuncInstance},
{pkgbits.V1, pkgbits.DerivedFuncInstance},
{pkgbits.V0, pkgbits.DerivedInfoNeeded},
{pkgbits.V1, pkgbits.DerivedInfoNeeded},
{pkgbits.V2, pkgbits.AliasTypeParamNames},
} {
if !c.v.Has(c.f) {
t.Errorf("Expected version %v to have field %v", c.v, c.f)
}
}
// does not have field tests
for _, c := range []vfpair{
{pkgbits.V0, pkgbits.Flags},
{pkgbits.V2, pkgbits.HasInit},
{pkgbits.V2, pkgbits.DerivedFuncInstance},
{pkgbits.V2, pkgbits.DerivedInfoNeeded},
{pkgbits.V0, pkgbits.AliasTypeParamNames},
{pkgbits.V1, pkgbits.AliasTypeParamNames},
} {
if c.v.Has(c.f) {
t.Errorf("Expected version %v to not have field %v", c.v, c.f)
}
}
}

View File

@@ -12,6 +12,6 @@ func assert(b bool) {
}
}
func errorf(format string, args ...any) {
func panicf(format string, args ...any) {
panic(fmt.Errorf(format, args...))
}

View File

@@ -0,0 +1,85 @@
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package pkgbits
// Version indicates a version of a unified IR bitstream.
// Each Version indicates the addition, removal, or change of
// new data in the bitstream.
//
// These are serialized to disk and the interpretation remains fixed.
type Version uint32
const (
// V0: initial prototype.
//
// All data that is not assigned a Field is in version V0
// and has not been deprecated.
V0 Version = iota
// V1: adds the Flags uint32 word
V1
// V2: removes unused legacy fields and supports type parameters for aliases.
// - remove the legacy "has init" bool from the public root
// - remove obj's "derived func instance" bool
// - add a TypeParamNames field to ObjAlias
// - remove derived info "needed" bool
V2
numVersions = iota
)
// Field denotes a unit of data in the serialized unified IR bitstream.
// It is conceptually a like field in a structure.
//
// We only really need Fields when the data may or may not be present
// in a stream based on the Version of the bitstream.
//
// Unlike much of pkgbits, Fields are not serialized and
// can change values as needed.
type Field int
const (
// Flags in a uint32 in the header of a bitstream
// that is used to indicate whether optional features are enabled.
Flags Field = iota
// Deprecated: HasInit was a bool indicating whether a package
// has any init functions.
HasInit
// Deprecated: DerivedFuncInstance was a bool indicating
// whether an object was a function instance.
DerivedFuncInstance
// ObjAlias has a list of TypeParamNames.
AliasTypeParamNames
// Deprecated: DerivedInfoNeeded was a bool indicating
// whether a type was a derived type.
DerivedInfoNeeded
numFields = iota
)
// introduced is the version a field was added.
var introduced = [numFields]Version{
Flags: V1,
AliasTypeParamNames: V2,
}
// removed is the version a field was removed in or 0 for fields
// that have not yet been deprecated.
// (So removed[f]-1 is the last version it is included in.)
var removed = [numFields]Version{
HasInit: V2,
DerivedFuncInstance: V2,
DerivedInfoNeeded: V2,
}
// Has reports whether field f is present in a bitstream at version v.
func (v Version) Has(f Field) bool {
return introduced[f] <= v && (v < removed[f] || removed[f] == V0)
}

View File

@@ -173,7 +173,8 @@ func BuildModeSupported(compiler, buildmode, goos, goarch string) bool {
"android/amd64", "android/arm", "android/arm64", "android/386",
"freebsd/amd64",
"darwin/amd64", "darwin/arm64",
"windows/amd64", "windows/386", "windows/arm64":
"windows/amd64", "windows/386", "windows/arm64",
"wasip1/wasm":
return true
}
return false
@@ -280,7 +281,7 @@ func FirstClass(goos, goarch string) bool {
return distInfo[OSArch{goos, goarch}].FirstClass
}
// Broken reportsr whether goos/goarch is considered a broken port.
// Broken reports whether goos/goarch is considered a broken port.
// (See https://go.dev/wiki/PortingPolicy#broken-ports.)
func Broken(goos, goarch string) bool {
return distInfo[OSArch{goos, goarch}].Broken

View File

@@ -111,6 +111,6 @@ var distInfo = map[OSArch]osArchInfo{
{"wasip1", "wasm"}: {},
{"windows", "386"}: {CgoSupported: true, FirstClass: true},
{"windows", "amd64"}: {CgoSupported: true, FirstClass: true},
{"windows", "arm"}: {},
{"windows", "arm"}: {Broken: true},
{"windows", "arm64"}: {CgoSupported: true},
}

View File

@@ -0,0 +1,53 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package poll
import (
"internal/syscall/unix"
"syscall"
)
func supportCopyFileRange() bool {
return unix.SupportCopyFileRange()
}
// For best performance, call copy_file_range() with the largest len value
// possible. It is interruptible on most file systems, so there is no penalty
// for using very large len values, even SSIZE_MAX.
const maxCopyFileRangeRound = 1<<31 - 1
func handleCopyFileRangeErr(err error, copied, written int64) (bool, error) {
switch err {
case syscall.ENOSYS:
// The copy_file_range(2) function first appeared in FreeBSD 13.0.
// Go supports FreeBSD >= 12, so the system call
// may not be present. We've detected the FreeBSD version with
// unix.SupportCopyFileRange() at the beginning of this function,
// but we still want to check for ENOSYS here to prevent some rare
// case like https://go.dev/issue/58592
//
// If we see ENOSYS, we have certainly not transferred
// any data, so we can tell the caller that we
// couldn't handle the transfer and let them fall
// back to more generic code.
return false, nil
case syscall.EFBIG, syscall.EINVAL, syscall.EIO:
// For EFBIG, the copy has exceeds the process's file size limit
// or the maximum file size for the filesystem dst resides on, in
// this case, we leave it to generic copy.
//
// For EINVAL, there could be a few reasons:
// 1. Either dst or src refers to a file object that
// is not a regular file, for instance, a pipe.
// 2. src and dst refer to the same file and byte ranges
// overlap.
// 3. The flags argument is not 0.
// Neither of these cases should be considered handled by
// copy_file_range(2) because there is no data transfer, so
// just fall back to generic copy.
return false, nil
}
return true, err
}

View File

@@ -10,6 +10,10 @@ import (
"syscall"
)
func supportCopyFileRange() bool {
return isKernelVersionGE53()
}
var isKernelVersionGE53 = sync.OnceValue(func() bool {
major, minor := unix.KernelVersion()
// copy_file_range(2) is broken in various ways on kernels older than 5.3,
@@ -18,104 +22,62 @@ var isKernelVersionGE53 = sync.OnceValue(func() bool {
return major > 5 || (major == 5 && minor >= 3)
})
const maxCopyFileRangeRound = 1 << 30
// For best performance, call copy_file_range() with the largest len value
// possible. Linux sets up a limitation of data transfer for most of its I/O
// system calls, as MAX_RW_COUNT (INT_MAX & PAGE_MASK). This value equals to
// the maximum integer value minus a page size that is typically 2^12=4096 bytes.
// That is to say, it's the maximum integer value with the lowest 12 bits unset,
// which is 0x7ffff000.
const maxCopyFileRangeRound = 0x7ffff000
// CopyFileRange copies at most remain bytes of data from src to dst, using
// the copy_file_range system call. dst and src must refer to regular files.
func CopyFileRange(dst, src *FD, remain int64) (written int64, handled bool, err error) {
if !isKernelVersionGE53() {
return 0, false, nil
}
for remain > 0 {
max := remain
if max > maxCopyFileRangeRound {
max = maxCopyFileRangeRound
}
n, err := copyFileRange(dst, src, int(max))
switch err {
case syscall.ENOSYS:
// copy_file_range(2) was introduced in Linux 4.5.
// Go supports Linux >= 2.6.33, so the system call
// may not be present.
//
// If we see ENOSYS, we have certainly not transferred
// any data, so we can tell the caller that we
// couldn't handle the transfer and let them fall
// back to more generic code.
return 0, false, nil
case syscall.EXDEV, syscall.EINVAL, syscall.EIO, syscall.EOPNOTSUPP, syscall.EPERM:
// Prior to Linux 5.3, it was not possible to
// copy_file_range across file systems. Similarly to
// the ENOSYS case above, if we see EXDEV, we have
// not transferred any data, and we can let the caller
// fall back to generic code.
//
// As for EINVAL, that is what we see if, for example,
// dst or src refer to a pipe rather than a regular
// file. This is another case where no data has been
// transferred, so we consider it unhandled.
//
// If src and dst are on CIFS, we can see EIO.
// See issue #42334.
//
// If the file is on NFS, we can see EOPNOTSUPP.
// See issue #40731.
//
// If the process is running inside a Docker container,
// we might see EPERM instead of ENOSYS. See issue
// #40893. Since EPERM might also be a legitimate error,
// don't mark copy_file_range(2) as unsupported.
return 0, false, nil
case nil:
if n == 0 {
// If we did not read any bytes at all,
// then this file may be in a file system
// where copy_file_range silently fails.
// https://lore.kernel.org/linux-fsdevel/20210126233840.GG4626@dread.disaster.area/T/#m05753578c7f7882f6e9ffe01f981bc223edef2b0
if written == 0 {
return 0, false, nil
}
// Otherwise src is at EOF, which means
// we are done.
return written, true, nil
func handleCopyFileRangeErr(err error, copied, written int64) (bool, error) {
switch err {
case syscall.ENOSYS:
// copy_file_range(2) was introduced in Linux 4.5.
// Go supports Linux >= 3.2, so the system call
// may not be present.
//
// If we see ENOSYS, we have certainly not transferred
// any data, so we can tell the caller that we
// couldn't handle the transfer and let them fall
// back to more generic code.
return false, nil
case syscall.EXDEV, syscall.EINVAL, syscall.EIO, syscall.EOPNOTSUPP, syscall.EPERM:
// Prior to Linux 5.3, it was not possible to
// copy_file_range across file systems. Similarly to
// the ENOSYS case above, if we see EXDEV, we have
// not transferred any data, and we can let the caller
// fall back to generic code.
//
// As for EINVAL, that is what we see if, for example,
// dst or src refer to a pipe rather than a regular
// file. This is another case where no data has been
// transferred, so we consider it unhandled.
//
// If src and dst are on CIFS, we can see EIO.
// See issue #42334.
//
// If the file is on NFS, we can see EOPNOTSUPP.
// See issue #40731.
//
// If the process is running inside a Docker container,
// we might see EPERM instead of ENOSYS. See issue
// #40893. Since EPERM might also be a legitimate error,
// don't mark copy_file_range(2) as unsupported.
return false, nil
case nil:
if copied == 0 {
// If we did not read any bytes at all,
// then this file may be in a file system
// where copy_file_range silently fails.
// https://lore.kernel.org/linux-fsdevel/20210126233840.GG4626@dread.disaster.area/T/#m05753578c7f7882f6e9ffe01f981bc223edef2b0
if written == 0 {
return false, nil
}
remain -= n
written += n
default:
return written, true, err
}
}
return written, true, nil
}
// copyFileRange performs one round of copy_file_range(2).
func copyFileRange(dst, src *FD, max int) (written int64, err error) {
// The signature of copy_file_range(2) is:
//
// ssize_t copy_file_range(int fd_in, loff_t *off_in,
// int fd_out, loff_t *off_out,
// size_t len, unsigned int flags);
//
// Note that in the call to unix.CopyFileRange below, we use nil
// values for off_in and off_out. For the system call, this means
// "use and update the file offsets". That is why we must acquire
// locks for both file descriptors (and why this whole machinery is
// in the internal/poll package to begin with).
if err := dst.writeLock(); err != nil {
return 0, err
}
defer dst.writeUnlock()
if err := src.readLock(); err != nil {
return 0, err
}
defer src.readUnlock()
var n int
for {
n, err = unix.CopyFileRange(src.Sysfd, nil, dst.Sysfd, nil, max, 0)
if err != syscall.EINTR {
break
// Otherwise src is at EOF, which means
// we are done.
}
}
return int64(n), err
return true, err
}

View File

@@ -0,0 +1,70 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build freebsd || linux
package poll
import "internal/syscall/unix"
// CopyFileRange copies at most remain bytes of data from src to dst, using
// the copy_file_range system call. dst and src must refer to regular files.
func CopyFileRange(dst, src *FD, remain int64) (written int64, handled bool, err error) {
if !supportCopyFileRange() {
return 0, false, nil
}
for remain > 0 {
max := remain
if max > maxCopyFileRangeRound {
max = maxCopyFileRangeRound
}
n, e := copyFileRange(dst, src, int(max))
if n > 0 {
remain -= n
written += n
}
handled, err = handleCopyFileRangeErr(e, n, written)
if n == 0 || !handled || err != nil {
return
}
}
return written, true, nil
}
// copyFileRange performs one round of copy_file_range(2).
func copyFileRange(dst, src *FD, max int) (written int64, err error) {
// For Linux, the signature of copy_file_range(2) is:
//
// ssize_t copy_file_range(int fd_in, loff_t *off_in,
// int fd_out, loff_t *off_out,
// size_t len, unsigned int flags);
//
// For FreeBSD, the signature of copy_file_range(2) is:
//
// ssize_t
// copy_file_range(int infd, off_t *inoffp, int outfd, off_t *outoffp,
// size_t len, unsigned int flags);
//
// Note that in the call to unix.CopyFileRange below, we use nil
// values for off_in/off_out and inoffp/outoffp, which means "the file
// offset for infd(fd_in) or outfd(fd_out) respectively will be used and
// updated by the number of bytes copied".
//
// That is why we must acquire locks for both file descriptors (and why
// this whole machinery is in the internal/poll package to begin with).
if err := dst.writeLock(); err != nil {
return 0, err
}
defer dst.writeUnlock()
if err := src.readLock(); err != nil {
return 0, err
}
defer src.readUnlock()
return ignoringEINTR2(func() (int64, error) {
n, err := unix.CopyFileRange(src.Sysfd, nil, dst.Sysfd, nil, max, 0)
return int64(n), err
})
}

View File

@@ -77,3 +77,13 @@ func ignoringEINTR(fn func() error) error {
}
}
}
// ignoringEINTR2 is ignoringEINTR, but returning an additional value.
func ignoringEINTR2[T any](fn func() (T, error)) (T, error) {
for {
v, err := fn()
if err != syscall.EINTR {
return v, err
}
}
}

View File

@@ -225,11 +225,11 @@ func readIntLE(b []byte, size uintptr) uint64 {
case 1:
return uint64(b[0])
case 2:
return uint64(byteorder.LeUint16(b))
return uint64(byteorder.LEUint16(b))
case 4:
return uint64(byteorder.LeUint32(b))
return uint64(byteorder.LEUint32(b))
case 8:
return uint64(byteorder.LeUint64(b))
return uint64(byteorder.LEUint64(b))
default:
panic("internal/poll: readInt with unsupported size")
}

View File

@@ -1,77 +0,0 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build darwin || dragonfly || freebsd
package poll
import "syscall"
// maxSendfileSize is the largest chunk size we ask the kernel to copy
// at a time.
const maxSendfileSize int = 4 << 20
// SendFile wraps the sendfile system call.
func SendFile(dstFD *FD, src int, pos, remain int64) (written int64, err error, handled bool) {
defer func() {
TestHookDidSendFile(dstFD, src, written, err, handled)
}()
if err := dstFD.writeLock(); err != nil {
return 0, err, false
}
defer dstFD.writeUnlock()
if err := dstFD.pd.prepareWrite(dstFD.isFile); err != nil {
return 0, err, false
}
dst := dstFD.Sysfd
for remain > 0 {
n := maxSendfileSize
if int64(n) > remain {
n = int(remain)
}
m := n
pos1 := pos
n, err = syscall.Sendfile(dst, src, &pos1, n)
if n > 0 {
pos += int64(n)
written += int64(n)
remain -= int64(n)
// (n, nil) indicates that sendfile(2) has transferred
// the exact number of bytes we requested, or some unretryable
// error have occurred with partial bytes sent. Either way, we
// don't need to go through the following logic to check EINTR
// or fell into dstFD.pd.waitWrite, just continue to send the
// next chunk or break the loop.
if n == m {
continue
} else if err != syscall.EAGAIN &&
err != syscall.EINTR &&
err != syscall.EBUSY {
// Particularly, EPIPE. Errors like that would normally lead
// the subsequent sendfile(2) call to (-1, EBADF).
break
}
} else if err != syscall.EAGAIN && err != syscall.EINTR {
// This includes syscall.ENOSYS (no kernel
// support) and syscall.EINVAL (fd types which
// don't implement sendfile), and other errors.
// We should end the loop when there is no error
// returned from sendfile(2) or it is not a retryable error.
break
}
if err == syscall.EINTR {
continue
}
if err = dstFD.pd.waitWrite(dstFD.isFile); err != nil {
break
}
}
if err == syscall.EAGAIN {
err = nil
}
handled = written != 0 || (err != syscall.ENOSYS && err != syscall.EINVAL && err != syscall.EOPNOTSUPP && err != syscall.ENOTSUP)
return
}

View File

@@ -1,58 +0,0 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package poll
import "syscall"
// maxSendfileSize is the largest chunk size we ask the kernel to copy
// at a time.
const maxSendfileSize int = 4 << 20
// SendFile wraps the sendfile system call.
func SendFile(dstFD *FD, src int, remain int64) (written int64, err error, handled bool) {
defer func() {
TestHookDidSendFile(dstFD, src, written, err, handled)
}()
if err := dstFD.writeLock(); err != nil {
return 0, err, false
}
defer dstFD.writeUnlock()
if err := dstFD.pd.prepareWrite(dstFD.isFile); err != nil {
return 0, err, false
}
dst := dstFD.Sysfd
for remain > 0 {
n := maxSendfileSize
if int64(n) > remain {
n = int(remain)
}
n, err = syscall.Sendfile(dst, src, nil, n)
if n > 0 {
written += int64(n)
remain -= int64(n)
continue
} else if err != syscall.EAGAIN && err != syscall.EINTR {
// This includes syscall.ENOSYS (no kernel
// support) and syscall.EINVAL (fd types which
// don't implement sendfile), and other errors.
// We should end the loop when there is no error
// returned from sendfile(2) or it is not a retryable error.
break
}
if err == syscall.EINTR {
continue
}
if err = dstFD.pd.waitWrite(dstFD.isFile); err != nil {
break
}
}
if err == syscall.EAGAIN {
err = nil
}
handled = written != 0 || (err != syscall.ENOSYS && err != syscall.EINVAL)
return
}

View File

@@ -4,66 +4,9 @@
package poll
import "syscall"
//go:cgo_ldflag "-lsendfile"
// Not strictly needed, but very helpful for debugging, see issue #10221.
//
//go:cgo_import_dynamic _ _ "libsendfile.so"
//go:cgo_import_dynamic _ _ "libsocket.so"
// maxSendfileSize is the largest chunk size we ask the kernel to copy
// at a time.
const maxSendfileSize int = 4 << 20
// SendFile wraps the sendfile system call.
func SendFile(dstFD *FD, src int, pos, remain int64) (written int64, err error, handled bool) {
defer func() {
TestHookDidSendFile(dstFD, src, written, err, handled)
}()
if err := dstFD.writeLock(); err != nil {
return 0, err, false
}
defer dstFD.writeUnlock()
if err := dstFD.pd.prepareWrite(dstFD.isFile); err != nil {
return 0, err, false
}
dst := dstFD.Sysfd
for remain > 0 {
n := maxSendfileSize
if int64(n) > remain {
n = int(remain)
}
pos1 := pos
n, err = syscall.Sendfile(dst, src, &pos1, n)
if err == syscall.EAGAIN || err == syscall.EINTR {
// partial write may have occurred
n = int(pos1 - pos)
}
if n > 0 {
pos += int64(n)
written += int64(n)
remain -= int64(n)
continue
} else if err != syscall.EAGAIN && err != syscall.EINTR {
// This includes syscall.ENOSYS (no kernel
// support) and syscall.EINVAL (fd types which
// don't implement sendfile), and other errors.
// We should end the loop when there is no error
// returned from sendfile(2) or it is not a retryable error.
break
}
if err == syscall.EINTR {
continue
}
if err = dstFD.pd.waitWrite(dstFD.isFile); err != nil {
break
}
}
if err == syscall.EAGAIN {
err = nil
}
handled = written != 0 || (err != syscall.ENOSYS && err != syscall.EINVAL)
return
}

View File

@@ -0,0 +1,170 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build darwin || dragonfly || freebsd || linux || solaris
package poll
import (
"io"
"runtime"
"syscall"
)
// SendFile wraps the sendfile system call.
//
// It copies data from src (a file descriptor) to dstFD,
// starting at the current position of src.
// It updates the current position of src to after the
// copied data.
//
// If size is zero, it copies the rest of src.
// Otherwise, it copies up to size bytes.
//
// The handled return parameter indicates whether SendFile
// was able to handle some or all of the operation.
// If handled is false, sendfile was unable to perform the copy,
// has not modified the source or destination,
// and the caller should perform the copy using a fallback implementation.
func SendFile(dstFD *FD, src int, size int64) (n int64, err error, handled bool) {
if goos := runtime.GOOS; goos == "linux" || goos == "android" {
// Linux's sendfile doesn't require any setup:
// It sends from the current position of the source file and
// updates the position of the source after sending.
return sendFile(dstFD, src, nil, size)
}
// Non-Linux sendfile implementations don't use the current position of the source file,
// so we need to look up the position, pass it explicitly, and adjust it after
// sendfile returns.
start, err := ignoringEINTR2(func() (int64, error) {
return syscall.Seek(src, 0, io.SeekCurrent)
})
if err != nil {
return 0, err, false
}
pos := start
n, err, handled = sendFile(dstFD, src, &pos, size)
if n > 0 {
ignoringEINTR2(func() (int64, error) {
return syscall.Seek(src, start+n, io.SeekStart)
})
}
return n, err, handled
}
// sendFile wraps the sendfile system call.
func sendFile(dstFD *FD, src int, offset *int64, size int64) (written int64, err error, handled bool) {
defer func() {
TestHookDidSendFile(dstFD, src, written, err, handled)
}()
if err := dstFD.writeLock(); err != nil {
return 0, err, false
}
defer dstFD.writeUnlock()
if err := dstFD.pd.prepareWrite(dstFD.isFile); err != nil {
return 0, err, false
}
dst := dstFD.Sysfd
for {
// Some platforms support passing 0 to read to the end of the source,
// but all platforms support just writing a large value.
//
// Limit the maximum size to fit in an int32, to avoid any possible overflow.
chunk := 1<<31 - 1
if size > 0 {
chunk = int(min(size-written, int64(chunk)))
}
var n int
n, err = sendFileChunk(dst, src, offset, chunk, written)
if n > 0 {
written += int64(n)
}
switch err {
case nil:
// We're done if sendfile copied no bytes
// (we're at the end of the source)
// or if we have a size limit and have reached it.
//
// If sendfile copied some bytes and we don't have a size limit,
// try again to see if there is more data to copy.
if n == 0 || (size > 0 && written >= size) {
return written, nil, true
}
case syscall.EAGAIN:
// *BSD and Darwin can return EAGAIN with n > 0,
// so check to see if the write has completed.
// So far as we know all other platforms only
// return EAGAIN when n == 0, but checking is harmless.
if size > 0 && written >= size {
return written, nil, true
}
if err = dstFD.pd.waitWrite(dstFD.isFile); err != nil {
return written, err, true
}
case syscall.EINTR:
// Retry.
case syscall.ENOSYS, syscall.EOPNOTSUPP, syscall.EINVAL:
// ENOSYS indicates no kernel support for sendfile.
// EINVAL indicates a FD type that does not support sendfile.
//
// On Linux, copy_file_range can return EOPNOTSUPP when copying
// to a NFS file (issue #40731); check for it here just in case.
return written, err, written > 0
default:
// We want to handle ENOTSUP like EOPNOTSUPP.
// It's a pain to put it as a switch case
// because on Linux systems ENOTSUP == EOPNOTSUPP,
// so the compiler complains about a duplicate case.
if err == syscall.ENOTSUP {
return written, err, written > 0
}
// Not a retryable error.
return written, err, true
}
}
}
func sendFileChunk(dst, src int, offset *int64, size int, written int64) (n int, err error) {
switch runtime.GOOS {
case "linux", "android":
// The offset is always nil on Linux.
n, err = syscall.Sendfile(dst, src, offset, size)
case "solaris", "illumos":
// Trust the offset, not the return value from sendfile.
start := *offset
n, err = syscall.Sendfile(dst, src, offset, size)
n = int(*offset - start)
// A quirk on Solaris/illumos: sendfile claims to support out_fd
// as a regular file but returns EINVAL when the out_fd
// is not a socket of SOCK_STREAM, while it actually sends
// out data anyway and updates the file offset.
//
// Another quirk: sendfile transfers data and returns EINVAL when being
// asked to transfer bytes more than the actual file size. For instance,
// the source file is wrapped in an io.LimitedReader with larger size
// than the actual file size.
//
// To handle these cases we ignore EINVAL if any call to sendfile was
// able to send data.
if err == syscall.EINVAL && (n > 0 || written > 0) {
err = nil
}
default:
start := *offset
n, err = syscall.Sendfile(dst, src, offset, size)
if n > 0 {
// The BSD implementations of syscall.Sendfile don't
// update the offset parameter (despite it being a *int64).
//
// Trust the return value from sendfile, not the offset.
*offset = start + int64(n)
}
}
return
}

View File

@@ -5,7 +5,7 @@
// This file implements accept for platforms that provide a fast path for
// setting SetNonblock and CloseOnExec.
//go:build dragonfly || freebsd || (linux && !arm) || netbsd || openbsd
//go:build dragonfly || freebsd || linux || netbsd || openbsd
package poll

View File

@@ -1,51 +0,0 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file implements accept for platforms that provide a fast path for
// setting SetNonblock and CloseOnExec, but don't necessarily have accept4.
// This is the code we used for accept in Go 1.17 and earlier.
// On Linux the accept4 system call was introduced in 2.6.28 kernel,
// and our minimum requirement is 2.6.32, so we simplified the function.
// Unfortunately, on ARM accept4 wasn't added until 2.6.36, so for ARM
// only we continue using the older code.
//go:build linux && arm
package poll
import "syscall"
// Wrapper around the accept system call that marks the returned file
// descriptor as nonblocking and close-on-exec.
func accept(s int) (int, syscall.Sockaddr, string, error) {
ns, sa, err := Accept4Func(s, syscall.SOCK_NONBLOCK|syscall.SOCK_CLOEXEC)
switch err {
case nil:
return ns, sa, "", nil
default: // errors other than the ones listed
return -1, sa, "accept4", err
case syscall.ENOSYS: // syscall missing
case syscall.EINVAL: // some Linux use this instead of ENOSYS
case syscall.EACCES: // some Linux use this instead of ENOSYS
case syscall.EFAULT: // some Linux use this instead of ENOSYS
}
// See ../syscall/exec_unix.go for description of ForkLock.
// It is probably okay to hold the lock across syscall.Accept
// because we have put fd.sysfd into non-blocking mode.
// However, a call to the File method will put it back into
// blocking mode. We can't take that risk, so no use of ForkLock here.
ns, sa, err = AcceptFunc(s)
if err == nil {
syscall.CloseOnExec(ns)
}
if err != nil {
return -1, nil, "accept", err
}
if err = syscall.SetNonblock(ns, true); err != nil {
CloseFunc(ns)
return -1, nil, "setnonblock", err
}
return ns, sa, "", nil
}

View File

@@ -5,7 +5,7 @@
package profile
import (
"reflect"
"slices"
"testing"
)
@@ -34,7 +34,7 @@ func TestPackedEncoding(t *testing.T) {
},
} {
source := &packedInts{tc.uint64s, tc.int64s}
if got, want := marshal(source), tc.encoded; !reflect.DeepEqual(got, want) {
if got, want := marshal(source), tc.encoded; !slices.Equal(got, want) {
t.Errorf("failed encode %d, got %v, want %v", i, got, want)
}
@@ -43,10 +43,10 @@ func TestPackedEncoding(t *testing.T) {
t.Errorf("failed decode %d: %v", i, err)
continue
}
if got, want := dest.uint64s, tc.uint64s; !reflect.DeepEqual(got, want) {
if got, want := dest.uint64s, tc.uint64s; !slices.Equal(got, want) {
t.Errorf("failed decode uint64s %d, got %v, want %v", i, got, want)
}
if got, want := dest.int64s, tc.int64s; !reflect.DeepEqual(got, want) {
if got, want := dest.int64s, tc.int64s; !slices.Equal(got, want) {
t.Errorf("failed decode int64s %d, got %v, want %v", i, got, want)
}
}

View File

@@ -7,6 +7,7 @@
package race
import (
"internal/abi"
"unsafe"
)
@@ -30,9 +31,21 @@ func Enable() {
func Read(addr unsafe.Pointer) {
}
func ReadPC(addr unsafe.Pointer, callerpc, pc uintptr) {
}
func ReadObjectPC(t *abi.Type, addr unsafe.Pointer, callerpc, pc uintptr) {
}
func Write(addr unsafe.Pointer) {
}
func WritePC(addr unsafe.Pointer, callerpc, pc uintptr) {
}
func WriteObjectPC(t *abi.Type, addr unsafe.Pointer, callerpc, pc uintptr) {
}
func ReadRange(addr unsafe.Pointer, len int) {
}

View File

@@ -7,48 +7,52 @@
package race
import (
"runtime"
"internal/abi"
"unsafe"
)
const Enabled = true
func Acquire(addr unsafe.Pointer) {
runtime.RaceAcquire(addr)
}
// Functions below pushed from runtime.
func Release(addr unsafe.Pointer) {
runtime.RaceRelease(addr)
}
//go:linkname Acquire
func Acquire(addr unsafe.Pointer)
func ReleaseMerge(addr unsafe.Pointer) {
runtime.RaceReleaseMerge(addr)
}
//go:linkname Release
func Release(addr unsafe.Pointer)
func Disable() {
runtime.RaceDisable()
}
//go:linkname ReleaseMerge
func ReleaseMerge(addr unsafe.Pointer)
func Enable() {
runtime.RaceEnable()
}
//go:linkname Disable
func Disable()
func Read(addr unsafe.Pointer) {
runtime.RaceRead(addr)
}
//go:linkname Enable
func Enable()
func Write(addr unsafe.Pointer) {
runtime.RaceWrite(addr)
}
//go:linkname Read
func Read(addr unsafe.Pointer)
func ReadRange(addr unsafe.Pointer, len int) {
runtime.RaceReadRange(addr, len)
}
//go:linkname ReadPC
func ReadPC(addr unsafe.Pointer, callerpc, pc uintptr)
func WriteRange(addr unsafe.Pointer, len int) {
runtime.RaceWriteRange(addr, len)
}
//go:linkname ReadObjectPC
func ReadObjectPC(t *abi.Type, addr unsafe.Pointer, callerpc, pc uintptr)
func Errors() int {
return runtime.RaceErrors()
}
//go:linkname Write
func Write(addr unsafe.Pointer)
//go:linkname WritePC
func WritePC(addr unsafe.Pointer, callerpc, pc uintptr)
//go:linkname WriteObjectPC
func WriteObjectPC(t *abi.Type, addr unsafe.Pointer, callerpc, pc uintptr)
//go:linkname ReadRange
func ReadRange(addr unsafe.Pointer, len int)
//go:linkname WriteRange
func WriteRange(addr unsafe.Pointer, len int)
//go:linkname Errors
func Errors() int

View File

@@ -53,6 +53,9 @@ func Xchg64(ptr *uint64, new uint64) uint64
//go:noescape
func Xchg(ptr *uint32, new uint32) uint32
//go:noescape
func Xchg8(ptr *uint8, new uint8) uint8
//go:noescape
func Xchguintptr(ptr *uintptr, new uintptr) uintptr

View File

@@ -153,6 +153,14 @@ addloop:
MOVL CX, ret_hi+16(FP)
RET
// uint8 Xchg8(uint8 *ptr, uint8 new)
TEXT ·Xchg8(SB), NOSPLIT, $0-9
MOVL ptr+0(FP), BX
MOVB new+4(FP), AX
XCHGB AX, 0(BX)
MOVB AX, ret+8(FP)
RET
TEXT ·Xchg(SB), NOSPLIT, $0-12
MOVL ptr+0(FP), BX
MOVL new+4(FP), AX

View File

@@ -57,6 +57,9 @@ func Xadd64(ptr *uint64, delta int64) uint64
//go:noescape
func Xadduintptr(ptr *uintptr, delta uintptr) uintptr
//go:noescape
func Xchg8(ptr *uint8, new uint8) uint8
//go:noescape
func Xchg(ptr *uint32, new uint32) uint32

View File

@@ -117,6 +117,18 @@ TEXT ·Xaddint64(SB), NOSPLIT, $0-24
TEXT ·Xadduintptr(SB), NOSPLIT, $0-24
JMP ·Xadd64(SB)
// uint8 Xchg(ptr *uint8, new uint8)
// Atomically:
// old := *ptr;
// *ptr = new;
// return old;
TEXT ·Xchg8(SB), NOSPLIT, $0-17
MOVQ ptr+0(FP), BX
MOVB new+8(FP), AX
XCHGB AX, 0(BX)
MOVB AX, ret+16(FP)
RET
// uint32 Xchg(ptr *uint32, new uint32)
// Atomically:
// old := *ptr;

View File

@@ -74,6 +74,27 @@ func Xchg(addr *uint32, v uint32) uint32 {
}
}
//go:noescape
func Xchg8(addr *uint8, v uint8) uint8
//go:nosplit
func goXchg8(addr *uint8, v uint8) uint8 {
// Align down to 4 bytes and use 32-bit CAS.
addr32 := (*uint32)(unsafe.Pointer(uintptr(unsafe.Pointer(addr)) &^ 3))
shift := (uintptr(unsafe.Pointer(addr)) & 3) * 8 // little endian
word := uint32(v) << shift
mask := uint32(0xFF) << shift
for {
old := *addr32 // Read the old 32-bit value
// Clear the old 8 bits then insert the new value
if Cas(addr32, old, (old&^mask)|word) {
// Return the old 8-bit value
return uint8((old & mask) >> shift)
}
}
}
//go:nosplit
func Xchguintptr(addr *uintptr, v uintptr) uintptr {
return uintptr(Xchg((*uint32)(unsafe.Pointer(addr)), uint32(v)))
@@ -159,12 +180,14 @@ func goStore64(addr *uint64, v uint64) {
addrLock(addr).unlock()
}
//go:noescape
func Or8(addr *uint8, v uint8)
//go:nosplit
func Or8(addr *uint8, v uint8) {
func goOr8(addr *uint8, v uint8) {
// Align down to 4 bytes and use 32-bit CAS.
uaddr := uintptr(unsafe.Pointer(addr))
addr32 := (*uint32)(unsafe.Pointer(uaddr &^ 3))
word := uint32(v) << ((uaddr & 3) * 8) // little endian
addr32 := (*uint32)(unsafe.Pointer(uintptr(unsafe.Pointer(addr)) &^ 3))
word := uint32(v) << ((uintptr(unsafe.Pointer(addr)) & 3) * 8) // little endian
for {
old := *addr32
if Cas(addr32, old, old|word) {
@@ -173,13 +196,15 @@ func Or8(addr *uint8, v uint8) {
}
}
//go:noescape
func And8(addr *uint8, v uint8)
//go:nosplit
func And8(addr *uint8, v uint8) {
func goAnd8(addr *uint8, v uint8) {
// Align down to 4 bytes and use 32-bit CAS.
uaddr := uintptr(unsafe.Pointer(addr))
addr32 := (*uint32)(unsafe.Pointer(uaddr &^ 3))
word := uint32(v) << ((uaddr & 3) * 8) // little endian
mask := uint32(0xFF) << ((uaddr & 3) * 8) // little endian
addr32 := (*uint32)(unsafe.Pointer(uintptr(unsafe.Pointer(addr)) &^ 3))
word := uint32(v) << ((uintptr(unsafe.Pointer(addr)) & 3) * 8) // little endian
mask := uint32(0xFF) << ((uintptr(unsafe.Pointer(addr)) & 3) * 8) // little endian
word |= ^mask
for {
old := *addr32

View File

@@ -228,6 +228,59 @@ store64loop:
DMB MB_ISH
RET
TEXT armAnd8<>(SB),NOSPLIT,$0-5
// addr is already in R1
MOVB v+4(FP), R2
and8loop:
LDREXB (R1), R6
DMB MB_ISHST
AND R2, R6
STREXB R6, (R1), R0
CMP $0, R0
BNE and8loop
DMB MB_ISH
RET
TEXT armOr8<>(SB),NOSPLIT,$0-5
// addr is already in R1
MOVB v+4(FP), R2
or8loop:
LDREXB (R1), R6
DMB MB_ISHST
ORR R2, R6
STREXB R6, (R1), R0
CMP $0, R0
BNE or8loop
DMB MB_ISH
RET
TEXT armXchg8<>(SB),NOSPLIT,$0-9
// addr is already in R1
MOVB v+4(FP), R2
xchg8loop:
LDREXB (R1), R6
DMB MB_ISHST
STREXB R2, (R1), R0
CMP $0, R0
BNE xchg8loop
DMB MB_ISH
MOVB R6, ret+8(FP)
RET
// The following functions all panic if their address argument isn't
// 8-byte aligned. Since we're calling back into Go code to do this,
// we have to cooperate with stack unwinding. In the normal case, the
@@ -310,3 +363,45 @@ TEXT ·Store64(SB),NOSPLIT,$-4-12
JMP ·goStore64(SB)
#endif
JMP armStore64<>(SB)
TEXT ·And8(SB),NOSPLIT,$-4-5
NO_LOCAL_POINTERS
MOVW addr+0(FP), R1
// Uses STREXB/LDREXB that is armv6k or later.
// For simplicity we only enable this on armv7.
#ifndef GOARM_7
MOVB internalcpu·ARM+const_offsetARMHasV7Atomics(SB), R11
CMP $1, R11
BEQ 2(PC)
JMP ·goAnd8(SB)
#endif
JMP armAnd8<>(SB)
TEXT ·Or8(SB),NOSPLIT,$-4-5
NO_LOCAL_POINTERS
MOVW addr+0(FP), R1
// Uses STREXB/LDREXB that is armv6k or later.
// For simplicity we only enable this on armv7.
#ifndef GOARM_7
MOVB internalcpu·ARM+const_offsetARMHasV7Atomics(SB), R11
CMP $1, R11
BEQ 2(PC)
JMP ·goOr8(SB)
#endif
JMP armOr8<>(SB)
TEXT ·Xchg8(SB),NOSPLIT,$-4-9
NO_LOCAL_POINTERS
MOVW addr+0(FP), R1
// Uses STREXB/LDREXB that is armv6k or later.
// For simplicity we only enable this on armv7.
#ifndef GOARM_7
MOVB internalcpu·ARM+const_offsetARMHasV7Atomics(SB), R11
CMP $1, R11
BEQ 2(PC)
JMP ·goXchg8(SB)
#endif
JMP armXchg8<>(SB)

View File

@@ -24,6 +24,9 @@ func Xadd64(ptr *uint64, delta int64) uint64
//go:noescape
func Xadduintptr(ptr *uintptr, delta uintptr) uintptr
//go:noescape
func Xchg8(ptr *uint8, new uint8) uint8
//go:noescape
func Xchg(ptr *uint32, new uint32) uint32

View File

@@ -120,6 +120,30 @@ TEXT ·Store64(SB), NOSPLIT, $0-16
STLR R1, (R0)
RET
// uint8 Xchg(ptr *uint8, new uint8)
// Atomically:
// old := *ptr;
// *ptr = new;
// return old;
TEXT ·Xchg8(SB), NOSPLIT, $0-17
MOVD ptr+0(FP), R0
MOVB new+8(FP), R1
#ifndef GOARM64_LSE
MOVBU internalcpu·ARM64+const_offsetARM64HasATOMICS(SB), R4
CBZ R4, load_store_loop
#endif
SWPALB R1, (R0), R2
MOVB R2, ret+16(FP)
RET
#ifndef GOARM64_LSE
load_store_loop:
LDAXRB (R0), R2
STLXRB R1, (R0), R3
CBNZ R3, load_store_loop
MOVB R2, ret+16(FP)
RET
#endif
// uint32 Xchg(ptr *uint32, new uint32)
// Atomically:
// old := *ptr;

View File

@@ -6,7 +6,15 @@
package atomic
import "unsafe"
import (
"internal/cpu"
"unsafe"
)
const (
offsetLOONG64HasLAMCAS = unsafe.Offsetof(cpu.Loong64.HasLAMCAS)
offsetLoong64HasLAM_BH = unsafe.Offsetof(cpu.Loong64.HasLAM_BH)
)
//go:noescape
func Xadd(ptr *uint32, delta int32) uint32
@@ -17,6 +25,9 @@ func Xadd64(ptr *uint64, delta int64) uint64
//go:noescape
func Xadduintptr(ptr *uintptr, delta uintptr) uintptr
//go:noescape
func Xchg8(ptr *uint8, new uint8) uint8
//go:noescape
func Xchg(ptr *uint32, new uint32) uint32

View File

@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
#include "go_asm.h"
#include "textflag.h"
// bool cas(uint32 *ptr, uint32 old, uint32 new)
@@ -15,18 +16,32 @@ TEXT ·Cas(SB), NOSPLIT, $0-17
MOVV ptr+0(FP), R4
MOVW old+8(FP), R5
MOVW new+12(FP), R6
DBAR
MOVBU internalcpu·Loong64+const_offsetLOONG64HasLAMCAS(SB), R8
BEQ R8, cas_again
MOVV R5, R7 // backup old value
AMCASDBW R6, (R4), R5
BNE R7, R5, cas_fail0
MOVV $1, R4
MOVB R4, ret+16(FP)
RET
cas_fail0:
MOVB R0, ret+16(FP)
RET
// Implemented using the ll-sc instruction pair
DBAR $0x14 // LoadAcquire barrier
cas_again:
MOVV R6, R7
LL (R4), R8
BNE R5, R8, cas_fail
BNE R5, R8, cas_fail1
SC R7, (R4)
BEQ R7, cas_again
MOVV $1, R4
MOVB R4, ret+16(FP)
DBAR
DBAR $0x12 // StoreRelease barrier
RET
cas_fail:
cas_fail1:
MOVV $0, R4
JMP -4(PC)
@@ -42,21 +57,41 @@ TEXT ·Cas64(SB), NOSPLIT, $0-25
MOVV ptr+0(FP), R4
MOVV old+8(FP), R5
MOVV new+16(FP), R6
DBAR
MOVBU internalcpu·Loong64+const_offsetLOONG64HasLAMCAS(SB), R8
BEQ R8, cas64_again
MOVV R5, R7 // backup old value
AMCASDBV R6, (R4), R5
BNE R7, R5, cas64_fail0
MOVV $1, R4
MOVB R4, ret+24(FP)
RET
cas64_fail0:
MOVB R0, ret+24(FP)
RET
// Implemented using the ll-sc instruction pair
DBAR $0x14
cas64_again:
MOVV R6, R7
LLV (R4), R8
BNE R5, R8, cas64_fail
BNE R5, R8, cas64_fail1
SCV R7, (R4)
BEQ R7, cas64_again
MOVV $1, R4
MOVB R4, ret+24(FP)
DBAR
DBAR $0x12
RET
cas64_fail:
cas64_fail1:
MOVV $0, R4
JMP -4(PC)
TEXT ·Casint32(SB),NOSPLIT,$0-17
JMP ·Cas(SB)
TEXT ·Casint64(SB),NOSPLIT,$0-25
JMP ·Cas64(SB)
TEXT ·Casuintptr(SB), NOSPLIT, $0-25
JMP ·Cas64(SB)
@@ -78,6 +113,9 @@ TEXT ·Xadduintptr(SB), NOSPLIT, $0-24
TEXT ·Loadint64(SB), NOSPLIT, $0-16
JMP ·Load64(SB)
TEXT ·Xaddint32(SB),NOSPLIT,$0-20
JMP ·Xadd(SB)
TEXT ·Xaddint64(SB), NOSPLIT, $0-24
JMP ·Xadd64(SB)
@@ -91,65 +129,92 @@ TEXT ·Xaddint64(SB), NOSPLIT, $0-24
TEXT ·Casp1(SB), NOSPLIT, $0-25
JMP ·Cas64(SB)
// uint32 xadd(uint32 volatile *ptr, int32 delta)
// uint32 Xadd(uint32 volatile *ptr, int32 delta)
// Atomically:
// *val += delta;
// return *val;
TEXT ·Xadd(SB), NOSPLIT, $0-20
MOVV ptr+0(FP), R4
MOVW delta+8(FP), R5
DBAR
LL (R4), R6
ADDU R6, R5, R7
MOVV R7, R6
SC R7, (R4)
BEQ R7, -4(PC)
MOVW R6, ret+16(FP)
DBAR
AMADDDBW R5, (R4), R6
ADDV R6, R5, R4
MOVW R4, ret+16(FP)
RET
// func Xadd64(ptr *uint64, delta int64) uint64
TEXT ·Xadd64(SB), NOSPLIT, $0-24
MOVV ptr+0(FP), R4
MOVV delta+8(FP), R5
DBAR
LLV (R4), R6
ADDVU R6, R5, R7
MOVV R7, R6
SCV R7, (R4)
BEQ R7, -4(PC)
MOVV R6, ret+16(FP)
DBAR
AMADDDBV R5, (R4), R6
ADDV R6, R5, R4
MOVV R4, ret+16(FP)
RET
// uint8 Xchg8(ptr *uint8, new uint8)
// Atomically:
// old := *ptr;
// *ptr = new;
// return old;
TEXT ·Xchg8(SB), NOSPLIT, $0-17
MOVV ptr+0(FP), R4
MOVBU new+8(FP), R5
// R6 = ((ptr & 3) * 8)
AND $3, R4, R6
SLLV $3, R6
// R7 = ((0xFF) << R6) ^ (-1)
MOVV $0xFF, R8
SLLV R6, R8, R7
XOR $-1, R7
// R4 = ptr & (~3)
MOVV $~3, R8
AND R8, R4
// R5 = ((val) << R6)
SLLV R6, R5
DBAR $0x14 // LoadAcquire barrier
_xchg8_again:
LL (R4), R8
MOVV R8, R9 // backup old val
AND R7, R8
OR R5, R8
SC R8, (R4)
BEQ R8, _xchg8_again
DBAR $0x12 // StoreRelease barrier
SRLV R6, R9, R9
MOVBU R9, ret+16(FP)
RET
// func Xchg(ptr *uint32, new uint32) uint32
TEXT ·Xchg(SB), NOSPLIT, $0-20
MOVV ptr+0(FP), R4
MOVW new+8(FP), R5
DBAR
MOVV R5, R6
LL (R4), R7
SC R6, (R4)
BEQ R6, -3(PC)
MOVW R7, ret+16(FP)
DBAR
AMSWAPDBW R5, (R4), R6
MOVW R6, ret+16(FP)
RET
// func Xchg64(ptr *uint64, new uint64) uint64
TEXT ·Xchg64(SB), NOSPLIT, $0-24
MOVV ptr+0(FP), R4
MOVV new+8(FP), R5
DBAR
MOVV R5, R6
LLV (R4), R7
SCV R6, (R4)
BEQ R6, -3(PC)
MOVV R7, ret+16(FP)
DBAR
AMSWAPDBV R5, (R4), R6
MOVV R6, ret+16(FP)
RET
TEXT ·Xchguintptr(SB), NOSPLIT, $0-24
JMP ·Xchg64(SB)
// func Xchgint32(ptr *int32, new int32) int32
TEXT ·Xchgint32(SB), NOSPLIT, $0-20
JMP ·Xchg(SB)
// func Xchgint64(ptr *int64, new int64) int64
TEXT ·Xchgint64(SB), NOSPLIT, $0-24
JMP ·Xchg64(SB)
TEXT ·StorepNoWB(SB), NOSPLIT, $0-16
JMP ·Store64(SB)
@@ -165,147 +230,105 @@ TEXT ·StoreReluintptr(SB), NOSPLIT, $0-16
TEXT ·Store(SB), NOSPLIT, $0-12
MOVV ptr+0(FP), R4
MOVW val+8(FP), R5
DBAR
MOVW R5, 0(R4)
DBAR
AMSWAPDBW R5, (R4), R0
RET
TEXT ·Store8(SB), NOSPLIT, $0-9
MOVV ptr+0(FP), R4
MOVB val+8(FP), R5
DBAR
MOVBU internalcpu·Loong64+const_offsetLoong64HasLAM_BH(SB), R6
BEQ R6, _legacy_store8_
AMSWAPDBB R5, (R4), R0
RET
_legacy_store8_:
// StoreRelease barrier
DBAR $0x12
MOVB R5, 0(R4)
DBAR
DBAR $0x18
RET
TEXT ·Store64(SB), NOSPLIT, $0-16
MOVV ptr+0(FP), R4
MOVV val+8(FP), R5
DBAR
MOVV R5, 0(R4)
DBAR
AMSWAPDBV R5, (R4), R0
RET
// void Or8(byte volatile*, byte);
TEXT ·Or8(SB), NOSPLIT, $0-9
MOVV ptr+0(FP), R4
MOVBU val+8(FP), R5
// Align ptr down to 4 bytes so we can use 32-bit load/store.
// R6 = ptr & (~3)
MOVV $~3, R6
AND R4, R6
// R7 = ((ptr & 3) * 8)
AND $3, R4, R7
SLLV $3, R7
// Shift val for aligned ptr. R5 = val << R4
// R5 = val << R7
SLLV R7, R5
DBAR
LL (R6), R7
OR R5, R7
SC R7, (R6)
BEQ R7, -4(PC)
DBAR
AMORDBW R5, (R6), R0
RET
// void And8(byte volatile*, byte);
TEXT ·And8(SB), NOSPLIT, $0-9
MOVV ptr+0(FP), R4
MOVBU val+8(FP), R5
// Align ptr down to 4 bytes so we can use 32-bit load/store.
// R6 = ptr & (~3)
MOVV $~3, R6
AND R4, R6
// R7 = ((ptr & 3) * 8)
AND $3, R4, R7
SLLV $3, R7
// Shift val for aligned ptr. R5 = val << R7 | ^(0xFF << R7)
MOVV $0xFF, R8
SLLV R7, R5
SLLV R7, R8
NOR R0, R8
OR R8, R5
DBAR
LL (R6), R7
AND R5, R7
SC R7, (R6)
BEQ R7, -4(PC)
DBAR
// R5 = ((val ^ 0xFF) << R7) ^ (-1)
XOR $255, R5
SLLV R7, R5
XOR $-1, R5
AMANDDBW R5, (R6), R0
RET
// func Or(addr *uint32, v uint32)
TEXT ·Or(SB), NOSPLIT, $0-12
MOVV ptr+0(FP), R4
MOVW val+8(FP), R5
DBAR
LL (R4), R6
OR R5, R6
SC R6, (R4)
BEQ R6, -4(PC)
DBAR
AMORDBW R5, (R4), R0
RET
// func And(addr *uint32, v uint32)
TEXT ·And(SB), NOSPLIT, $0-12
MOVV ptr+0(FP), R4
MOVW val+8(FP), R5
DBAR
LL (R4), R6
AND R5, R6
SC R6, (R4)
BEQ R6, -4(PC)
DBAR
AMANDDBW R5, (R4), R0
RET
// func Or32(addr *uint32, v uint32) old uint32
TEXT ·Or32(SB), NOSPLIT, $0-20
MOVV ptr+0(FP), R4
MOVW val+8(FP), R5
DBAR
LL (R4), R6
OR R5, R6, R7
SC R7, (R4)
BEQ R7, -4(PC)
DBAR
MOVW R6, ret+16(FP)
AMORDBW R5, (R4), R6
MOVW R6, ret+16(FP)
RET
// func And32(addr *uint32, v uint32) old uint32
TEXT ·And32(SB), NOSPLIT, $0-20
MOVV ptr+0(FP), R4
MOVW val+8(FP), R5
DBAR
LL (R4), R6
AND R5, R6, R7
SC R7, (R4)
BEQ R7, -4(PC)
DBAR
MOVW R6, ret+16(FP)
AMANDDBW R5, (R4), R6
MOVW R6, ret+16(FP)
RET
// func Or64(addr *uint64, v uint64) old uint64
TEXT ·Or64(SB), NOSPLIT, $0-24
MOVV ptr+0(FP), R4
MOVV val+8(FP), R5
DBAR
LLV (R4), R6
OR R5, R6, R7
SCV R7, (R4)
BEQ R7, -4(PC)
DBAR
MOVV R6, ret+16(FP)
AMORDBV R5, (R4), R6
MOVV R6, ret+16(FP)
RET
// func And64(addr *uint64, v uint64) old uint64
TEXT ·And64(SB), NOSPLIT, $0-24
MOVV ptr+0(FP), R4
MOVV val+8(FP), R5
DBAR
LLV (R4), R6
AND R5, R6, R7
SCV R7, (R4)
BEQ R7, -4(PC)
DBAR
MOVV R6, ret+16(FP)
AMANDDBV R5, (R4), R6
MOVV R6, ret+16(FP)
RET
// func Anduintptr(addr *uintptr, v uintptr) old uintptr
@@ -319,38 +342,30 @@ TEXT ·Oruintptr(SB), NOSPLIT, $0-24
// uint32 internalruntimeatomic·Load(uint32 volatile* ptr)
TEXT ·Load(SB),NOSPLIT|NOFRAME,$0-12
MOVV ptr+0(FP), R19
DBAR
MOVWU 0(R19), R19
DBAR
DBAR $0x14 // LoadAcquire barrier
MOVW R19, ret+8(FP)
RET
// uint8 internalruntimeatomic·Load8(uint8 volatile* ptr)
TEXT ·Load8(SB),NOSPLIT|NOFRAME,$0-9
MOVV ptr+0(FP), R19
DBAR
MOVBU 0(R19), R19
DBAR
DBAR $0x14
MOVB R19, ret+8(FP)
RET
// uint64 internalruntimeatomic·Load64(uint64 volatile* ptr)
TEXT ·Load64(SB),NOSPLIT|NOFRAME,$0-16
MOVV ptr+0(FP), R19
DBAR
MOVV 0(R19), R19
DBAR
DBAR $0x14
MOVV R19, ret+8(FP)
RET
// void *internalruntimeatomic·Loadp(void *volatile *ptr)
TEXT ·Loadp(SB),NOSPLIT|NOFRAME,$0-16
MOVV ptr+0(FP), R19
DBAR
MOVV 0(R19), R19
DBAR
MOVV R19, ret+8(FP)
RET
JMP ·Load64(SB)
// uint32 internalruntimeatomic·LoadAcq(uint32 volatile* ptr)
TEXT ·LoadAcq(SB),NOSPLIT|NOFRAME,$0-12

View File

@@ -17,6 +17,9 @@ func Xadd64(ptr *uint64, delta int64) uint64
//go:noescape
func Xadduintptr(ptr *uintptr, delta uintptr) uintptr
//go:noescape
func Xchg8(ptr *uint8, new uint8) uint8
//go:noescape
func Xchg(ptr *uint32, new uint32) uint32

View File

@@ -236,6 +236,22 @@ TEXT ·Xadd64(SB), NOSPLIT, $0-24
MOVD R3, ret+16(FP)
RET
// uint8 Xchg(ptr *uint8, new uint8)
// Atomically:
// old := *ptr;
// *ptr = new;
// return old;
TEXT ·Xchg8(SB), NOSPLIT, $0-17
MOVD ptr+0(FP), R4
MOVB new+8(FP), R5
LWSYNC
LBAR (R4), R3
STBCCC R5, (R4)
BNE -2(PC)
ISYNC
MOVB R3, ret+16(FP)
RET
// uint32 Xchg(ptr *uint32, new uint32)
// Atomically:
// old := *ptr;

View File

@@ -43,6 +43,22 @@ func BenchmarkAtomicStore(b *testing.B) {
}
}
func BenchmarkAtomicLoad8(b *testing.B) {
var x uint8
sink = &x
for i := 0; i < b.N; i++ {
atomic.Load8(&x)
}
}
func BenchmarkAtomicStore8(b *testing.B) {
var x uint8
sink = &x
for i := 0; i < b.N; i++ {
atomic.Store8(&x, 0)
}
}
func BenchmarkAnd8(b *testing.B) {
var x [512]uint8 // give byte its own cache line
sink = &x

View File

@@ -0,0 +1,59 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build 386 || amd64 || arm || arm64 || loong64 || ppc64 || ppc64le
package atomic_test
import (
"internal/runtime/atomic"
"testing"
)
func TestXchg8(t *testing.T) {
var a [16]uint8
for i := range a {
next := uint8(i + 50)
a[i] = next
}
b := a
// Compare behavior against non-atomic implementation. Expect the operation
// to work at any byte offset and to not clobber neighboring values.
for i := range a {
next := uint8(i + 100)
pa := atomic.Xchg8(&a[i], next)
pb := b[i]
b[i] = next
if pa != pb {
t.Errorf("atomic.Xchg8(a[%d]); %d != %d", i, pa, pb)
}
if a != b {
t.Errorf("after atomic.Xchg8(a[%d]); %d != %d", i, a, b)
}
if t.Failed() {
break
}
}
}
func BenchmarkXchg8(b *testing.B) {
var x [512]uint8 // give byte its own cache line
sink = &x
for i := 0; i < b.N; i++ {
atomic.Xchg8(&x[255], uint8(i))
}
}
func BenchmarkXchg8Parallel(b *testing.B) {
var x [512]uint8 // give byte its own cache line
sink = &x
b.RunParallel(func(pb *testing.PB) {
i := uint8(0)
for pb.Next() {
atomic.Xchg8(&x[255], i)
i++
}
})
}

View File

@@ -0,0 +1,51 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !goexperiment.swissmap
// This file allows non-GOEXPERIMENT=swissmap builds (i.e., old map builds) to
// construct a swissmap table for running the tests in this package.
package maps
import (
"internal/abi"
"unsafe"
)
type instantiatedGroup[K comparable, V any] struct {
ctrls ctrlGroup
slots [abi.SwissMapGroupSlots]instantiatedSlot[K, V]
}
type instantiatedSlot[K comparable, V any] struct {
key K
elem V
}
func newTestMapType[K comparable, V any]() *abi.SwissMapType {
var m map[K]V
mTyp := abi.TypeOf(m)
omt := (*abi.OldMapType)(unsafe.Pointer(mTyp))
var grp instantiatedGroup[K, V]
var slot instantiatedSlot[K, V]
mt := &abi.SwissMapType{
Key: omt.Key,
Elem: omt.Elem,
Group: abi.TypeOf(grp),
Hasher: omt.Hasher,
SlotSize: unsafe.Sizeof(slot),
GroupSize: unsafe.Sizeof(grp),
ElemOff: unsafe.Offsetof(slot.elem),
}
if omt.NeedKeyUpdate() {
mt.Flags |= abi.SwissMapNeedKeyUpdate
}
if omt.HashMightPanic() {
mt.Flags |= abi.SwissMapHashMightPanic
}
return mt
}

View File

@@ -0,0 +1,19 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build goexperiment.swissmap
package maps
import (
"internal/abi"
"unsafe"
)
func newTestMapType[K comparable, V any]() *abi.SwissMapType {
var m map[K]V
mTyp := abi.TypeOf(m)
mt := (*abi.SwissMapType)(unsafe.Pointer(mTyp))
return mt
}

View File

@@ -0,0 +1,124 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package maps
import (
"internal/abi"
"unsafe"
)
type CtrlGroup = ctrlGroup
const DebugLog = debugLog
var AlignUpPow2 = alignUpPow2
const MaxTableCapacity = maxTableCapacity
const MaxAvgGroupLoad = maxAvgGroupLoad
// This isn't equivalent to runtime.maxAlloc. It is fine for basic testing but
// we can't properly test hint alloc overflows with this.
const maxAllocTest = 1 << 30
func NewTestMap[K comparable, V any](hint uintptr) (*Map, *abi.SwissMapType) {
mt := newTestMapType[K, V]()
return NewMap(mt, hint, nil, maxAllocTest), mt
}
func (m *Map) TableCount() int {
if m.dirLen <= 0 {
return 0
}
return m.dirLen
}
// Total group count, summed across all tables.
func (m *Map) GroupCount() uint64 {
if m.dirLen <= 0 {
if m.dirPtr == nil {
return 0
}
return 1
}
var n uint64
var lastTab *table
for i := range m.dirLen {
t := m.directoryAt(uintptr(i))
if t == lastTab {
continue
}
lastTab = t
n += t.groups.lengthMask + 1
}
return n
}
// Return a key from a group containing no empty slots.
//
// Returns nil if there are no full groups.
// Returns nil if a group is full but contains entirely deleted slots.
// Returns nil if the map is small.
func (m *Map) KeyFromFullGroup(typ *abi.SwissMapType) unsafe.Pointer {
if m.dirLen <= 0 {
return nil
}
var lastTab *table
for i := range m.dirLen {
t := m.directoryAt(uintptr(i))
if t == lastTab {
continue
}
lastTab = t
for i := uint64(0); i <= t.groups.lengthMask; i++ {
g := t.groups.group(typ, i)
match := g.ctrls().matchEmpty()
if match != 0 {
continue
}
// All full or deleted slots.
for j := uintptr(0); j < abi.SwissMapGroupSlots; j++ {
if g.ctrls().get(j) == ctrlDeleted {
continue
}
slotKey := g.key(typ, j)
if typ.IndirectKey() {
slotKey = *((*unsafe.Pointer)(slotKey))
}
return slotKey
}
}
}
return nil
}
// Returns nil if the map is small.
func (m *Map) TableFor(typ *abi.SwissMapType, key unsafe.Pointer) *table {
if m.dirLen <= 0 {
return nil
}
hash := typ.Hasher(key, m.seed)
idx := m.directoryIndex(hash)
return m.directoryAt(idx)
}
func (t *table) GrowthLeft() uint64 {
return uint64(t.growthLeft)
}
// Returns the start address of the groups array.
func (t *table) GroupsStart() unsafe.Pointer {
return t.groups.data
}
// Returns the length of the groups array.
func (t *table) GroupsLength() uintptr {
return uintptr(t.groups.lengthMask + 1)
}

Some files were not shown because too many files have changed in this diff Show More