runtime: map
This commit is contained in:
3
c/c.go
3
c/c.go
@@ -66,6 +66,9 @@ func Free(ptr Pointer)
|
||||
//go:linkname Memcpy C.memcpy
|
||||
func Memcpy(dst, src Pointer, n uintptr) Pointer
|
||||
|
||||
//go:linkname Memmove C.memmove
|
||||
func Memmove(dst, src Pointer, n uintptr) Pointer
|
||||
|
||||
//go:linkname Memset C.memset
|
||||
func Memset(s Pointer, c Int, n uintptr) Pointer
|
||||
|
||||
|
||||
@@ -170,6 +170,24 @@ type MapType struct {
|
||||
Flags uint32
|
||||
}
|
||||
|
||||
// Note: flag values must match those used in the TMAP case
|
||||
// in ../cmd/compile/internal/reflectdata/reflect.go:writeType.
|
||||
func (mt *MapType) IndirectKey() bool { // store ptr to key instead of key itself
|
||||
return mt.Flags&1 != 0
|
||||
}
|
||||
func (mt *MapType) IndirectElem() bool { // store ptr to elem instead of elem itself
|
||||
return mt.Flags&2 != 0
|
||||
}
|
||||
func (mt *MapType) ReflexiveKey() bool { // true if k==k for all keys
|
||||
return mt.Flags&4 != 0
|
||||
}
|
||||
func (mt *MapType) NeedKeyUpdate() bool { // true if we need to update key on an overwrite
|
||||
return mt.Flags&8 != 0
|
||||
}
|
||||
func (mt *MapType) HashMightPanic() bool { // true if hash function might panic
|
||||
return mt.Flags&16 != 0
|
||||
}
|
||||
|
||||
type PtrType struct {
|
||||
Type
|
||||
Elem *Type // pointer element (pointed at) type
|
||||
|
||||
334
internal/runtime/error.go
Normal file
334
internal/runtime/error.go
Normal file
@@ -0,0 +1,334 @@
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package runtime
|
||||
|
||||
/*
|
||||
import "internal/bytealg"
|
||||
|
||||
// The Error interface identifies a run time error.
|
||||
type Error interface {
|
||||
error
|
||||
|
||||
// RuntimeError is a no-op function but
|
||||
// serves to distinguish types that are run time
|
||||
// errors from ordinary errors: a type is a
|
||||
// run time error if it has a RuntimeError method.
|
||||
RuntimeError()
|
||||
}
|
||||
|
||||
// A TypeAssertionError explains a failed type assertion.
|
||||
type TypeAssertionError struct {
|
||||
_interface *_type
|
||||
concrete *_type
|
||||
asserted *_type
|
||||
missingMethod string // one method needed by Interface, missing from Concrete
|
||||
}
|
||||
|
||||
func (*TypeAssertionError) RuntimeError() {}
|
||||
|
||||
func (e *TypeAssertionError) Error() string {
|
||||
inter := "interface"
|
||||
if e._interface != nil {
|
||||
inter = toRType(e._interface).string()
|
||||
}
|
||||
as := toRType(e.asserted).string()
|
||||
if e.concrete == nil {
|
||||
return "interface conversion: " + inter + " is nil, not " + as
|
||||
}
|
||||
cs := toRType(e.concrete).string()
|
||||
if e.missingMethod == "" {
|
||||
msg := "interface conversion: " + inter + " is " + cs + ", not " + as
|
||||
if cs == as {
|
||||
// provide slightly clearer error message
|
||||
if toRType(e.concrete).pkgpath() != toRType(e.asserted).pkgpath() {
|
||||
msg += " (types from different packages)"
|
||||
} else {
|
||||
msg += " (types from different scopes)"
|
||||
}
|
||||
}
|
||||
return msg
|
||||
}
|
||||
return "interface conversion: " + cs + " is not " + as +
|
||||
": missing method " + e.missingMethod
|
||||
}
|
||||
|
||||
// itoa converts val to a decimal representation. The result is
|
||||
// written somewhere within buf and the location of the result is returned.
|
||||
// buf must be at least 20 bytes.
|
||||
//
|
||||
//go:nosplit
|
||||
func itoa(buf []byte, val uint64) []byte {
|
||||
i := len(buf) - 1
|
||||
for val >= 10 {
|
||||
buf[i] = byte(val%10 + '0')
|
||||
i--
|
||||
val /= 10
|
||||
}
|
||||
buf[i] = byte(val + '0')
|
||||
return buf[i:]
|
||||
}
|
||||
|
||||
// An errorString represents a runtime error described by a single string.
|
||||
type errorString string
|
||||
|
||||
func (e errorString) RuntimeError() {}
|
||||
|
||||
func (e errorString) Error() string {
|
||||
return "runtime error: " + string(e)
|
||||
}
|
||||
|
||||
type errorAddressString struct {
|
||||
msg string // error message
|
||||
addr uintptr // memory address where the error occurred
|
||||
}
|
||||
|
||||
func (e errorAddressString) RuntimeError() {}
|
||||
|
||||
func (e errorAddressString) Error() string {
|
||||
return "runtime error: " + e.msg
|
||||
}
|
||||
|
||||
// Addr returns the memory address where a fault occurred.
|
||||
// The address provided is best-effort.
|
||||
// The veracity of the result may depend on the platform.
|
||||
// Errors providing this method will only be returned as
|
||||
// a result of using runtime/debug.SetPanicOnFault.
|
||||
func (e errorAddressString) Addr() uintptr {
|
||||
return e.addr
|
||||
}
|
||||
*/
|
||||
|
||||
// plainError represents a runtime error described a string without
|
||||
// the prefix "runtime error: " after invoking errorString.Error().
|
||||
// See Issue #14965.
|
||||
type plainError string
|
||||
|
||||
func (e plainError) RuntimeError() {}
|
||||
|
||||
func (e plainError) Error() string {
|
||||
return string(e)
|
||||
}
|
||||
|
||||
/*
|
||||
// A boundsError represents an indexing or slicing operation gone wrong.
|
||||
type boundsError struct {
|
||||
x int64
|
||||
y int
|
||||
// Values in an index or slice expression can be signed or unsigned.
|
||||
// That means we'd need 65 bits to encode all possible indexes, from -2^63 to 2^64-1.
|
||||
// Instead, we keep track of whether x should be interpreted as signed or unsigned.
|
||||
// y is known to be nonnegative and to fit in an int.
|
||||
signed bool
|
||||
code boundsErrorCode
|
||||
}
|
||||
|
||||
type boundsErrorCode uint8
|
||||
|
||||
const (
|
||||
boundsIndex boundsErrorCode = iota // s[x], 0 <= x < len(s) failed
|
||||
|
||||
boundsSliceAlen // s[?:x], 0 <= x <= len(s) failed
|
||||
boundsSliceAcap // s[?:x], 0 <= x <= cap(s) failed
|
||||
boundsSliceB // s[x:y], 0 <= x <= y failed (but boundsSliceA didn't happen)
|
||||
|
||||
boundsSlice3Alen // s[?:?:x], 0 <= x <= len(s) failed
|
||||
boundsSlice3Acap // s[?:?:x], 0 <= x <= cap(s) failed
|
||||
boundsSlice3B // s[?:x:y], 0 <= x <= y failed (but boundsSlice3A didn't happen)
|
||||
boundsSlice3C // s[x:y:?], 0 <= x <= y failed (but boundsSlice3A/B didn't happen)
|
||||
|
||||
boundsConvert // (*[x]T)(s), 0 <= x <= len(s) failed
|
||||
// Note: in the above, len(s) and cap(s) are stored in y
|
||||
)
|
||||
|
||||
// boundsErrorFmts provide error text for various out-of-bounds panics.
|
||||
// Note: if you change these strings, you should adjust the size of the buffer
|
||||
// in boundsError.Error below as well.
|
||||
var boundsErrorFmts = [...]string{
|
||||
boundsIndex: "index out of range [%x] with length %y",
|
||||
boundsSliceAlen: "slice bounds out of range [:%x] with length %y",
|
||||
boundsSliceAcap: "slice bounds out of range [:%x] with capacity %y",
|
||||
boundsSliceB: "slice bounds out of range [%x:%y]",
|
||||
boundsSlice3Alen: "slice bounds out of range [::%x] with length %y",
|
||||
boundsSlice3Acap: "slice bounds out of range [::%x] with capacity %y",
|
||||
boundsSlice3B: "slice bounds out of range [:%x:%y]",
|
||||
boundsSlice3C: "slice bounds out of range [%x:%y:]",
|
||||
boundsConvert: "cannot convert slice with length %y to array or pointer to array with length %x",
|
||||
}
|
||||
|
||||
// boundsNegErrorFmts are overriding formats if x is negative. In this case there's no need to report y.
|
||||
var boundsNegErrorFmts = [...]string{
|
||||
boundsIndex: "index out of range [%x]",
|
||||
boundsSliceAlen: "slice bounds out of range [:%x]",
|
||||
boundsSliceAcap: "slice bounds out of range [:%x]",
|
||||
boundsSliceB: "slice bounds out of range [%x:]",
|
||||
boundsSlice3Alen: "slice bounds out of range [::%x]",
|
||||
boundsSlice3Acap: "slice bounds out of range [::%x]",
|
||||
boundsSlice3B: "slice bounds out of range [:%x:]",
|
||||
boundsSlice3C: "slice bounds out of range [%x::]",
|
||||
}
|
||||
|
||||
func (e boundsError) RuntimeError() {}
|
||||
|
||||
func appendIntStr(b []byte, v int64, signed bool) []byte {
|
||||
if signed && v < 0 {
|
||||
b = append(b, '-')
|
||||
v = -v
|
||||
}
|
||||
var buf [20]byte
|
||||
b = append(b, itoa(buf[:], uint64(v))...)
|
||||
return b
|
||||
}
|
||||
|
||||
func (e boundsError) Error() string {
|
||||
fmt := boundsErrorFmts[e.code]
|
||||
if e.signed && e.x < 0 {
|
||||
fmt = boundsNegErrorFmts[e.code]
|
||||
}
|
||||
// max message length is 99: "runtime error: slice bounds out of range [::%x] with capacity %y"
|
||||
// x can be at most 20 characters. y can be at most 19.
|
||||
b := make([]byte, 0, 100)
|
||||
b = append(b, "runtime error: "...)
|
||||
for i := 0; i < len(fmt); i++ {
|
||||
c := fmt[i]
|
||||
if c != '%' {
|
||||
b = append(b, c)
|
||||
continue
|
||||
}
|
||||
i++
|
||||
switch fmt[i] {
|
||||
case 'x':
|
||||
b = appendIntStr(b, e.x, e.signed)
|
||||
case 'y':
|
||||
b = appendIntStr(b, int64(e.y), true)
|
||||
}
|
||||
}
|
||||
return string(b)
|
||||
}
|
||||
|
||||
type stringer interface {
|
||||
String() string
|
||||
}
|
||||
|
||||
// printany prints an argument passed to panic.
|
||||
// If panic is called with a value that has a String or Error method,
|
||||
// it has already been converted into a string by preprintpanics.
|
||||
func printany(i any) {
|
||||
switch v := i.(type) {
|
||||
case nil:
|
||||
print("nil")
|
||||
case bool:
|
||||
print(v)
|
||||
case int:
|
||||
print(v)
|
||||
case int8:
|
||||
print(v)
|
||||
case int16:
|
||||
print(v)
|
||||
case int32:
|
||||
print(v)
|
||||
case int64:
|
||||
print(v)
|
||||
case uint:
|
||||
print(v)
|
||||
case uint8:
|
||||
print(v)
|
||||
case uint16:
|
||||
print(v)
|
||||
case uint32:
|
||||
print(v)
|
||||
case uint64:
|
||||
print(v)
|
||||
case uintptr:
|
||||
print(v)
|
||||
case float32:
|
||||
print(v)
|
||||
case float64:
|
||||
print(v)
|
||||
case complex64:
|
||||
print(v)
|
||||
case complex128:
|
||||
print(v)
|
||||
case string:
|
||||
print(v)
|
||||
default:
|
||||
printanycustomtype(i)
|
||||
}
|
||||
}
|
||||
|
||||
func printanycustomtype(i any) {
|
||||
eface := efaceOf(&i)
|
||||
typestring := toRType(eface._type).string()
|
||||
|
||||
switch eface._type.Kind_ {
|
||||
case kindString:
|
||||
print(typestring, `("`, *(*string)(eface.data), `")`)
|
||||
case kindBool:
|
||||
print(typestring, "(", *(*bool)(eface.data), ")")
|
||||
case kindInt:
|
||||
print(typestring, "(", *(*int)(eface.data), ")")
|
||||
case kindInt8:
|
||||
print(typestring, "(", *(*int8)(eface.data), ")")
|
||||
case kindInt16:
|
||||
print(typestring, "(", *(*int16)(eface.data), ")")
|
||||
case kindInt32:
|
||||
print(typestring, "(", *(*int32)(eface.data), ")")
|
||||
case kindInt64:
|
||||
print(typestring, "(", *(*int64)(eface.data), ")")
|
||||
case kindUint:
|
||||
print(typestring, "(", *(*uint)(eface.data), ")")
|
||||
case kindUint8:
|
||||
print(typestring, "(", *(*uint8)(eface.data), ")")
|
||||
case kindUint16:
|
||||
print(typestring, "(", *(*uint16)(eface.data), ")")
|
||||
case kindUint32:
|
||||
print(typestring, "(", *(*uint32)(eface.data), ")")
|
||||
case kindUint64:
|
||||
print(typestring, "(", *(*uint64)(eface.data), ")")
|
||||
case kindUintptr:
|
||||
print(typestring, "(", *(*uintptr)(eface.data), ")")
|
||||
case kindFloat32:
|
||||
print(typestring, "(", *(*float32)(eface.data), ")")
|
||||
case kindFloat64:
|
||||
print(typestring, "(", *(*float64)(eface.data), ")")
|
||||
case kindComplex64:
|
||||
print(typestring, *(*complex64)(eface.data))
|
||||
case kindComplex128:
|
||||
print(typestring, *(*complex128)(eface.data))
|
||||
default:
|
||||
print("(", typestring, ") ", eface.data)
|
||||
}
|
||||
}
|
||||
|
||||
// panicwrap generates a panic for a call to a wrapped value method
|
||||
// with a nil pointer receiver.
|
||||
//
|
||||
// It is called from the generated wrapper code.
|
||||
func panicwrap() {
|
||||
pc := getcallerpc()
|
||||
name := funcNameForPrint(funcname(findfunc(pc)))
|
||||
// name is something like "main.(*T).F".
|
||||
// We want to extract pkg ("main"), typ ("T"), and meth ("F").
|
||||
// Do it by finding the parens.
|
||||
i := bytealg.IndexByteString(name, '(')
|
||||
if i < 0 {
|
||||
throw("panicwrap: no ( in " + name)
|
||||
}
|
||||
pkg := name[:i-1]
|
||||
if i+2 >= len(name) || name[i-1:i+2] != ".(*" {
|
||||
throw("panicwrap: unexpected string after package name: " + name)
|
||||
}
|
||||
name = name[i+2:]
|
||||
i = bytealg.IndexByteString(name, ')')
|
||||
if i < 0 {
|
||||
throw("panicwrap: no ) in " + name)
|
||||
}
|
||||
if i+2 >= len(name) || name[i:i+2] != ")." {
|
||||
throw("panicwrap: unexpected string after type name: " + name)
|
||||
}
|
||||
typ := name[:i]
|
||||
meth := name[i+2:]
|
||||
panic(plainError("value method " + pkg + "." + typ + "." + meth + " called using nil *" + typ + " pointer"))
|
||||
}
|
||||
*/
|
||||
339
internal/runtime/malloc.go
Normal file
339
internal/runtime/malloc.go
Normal file
@@ -0,0 +1,339 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package runtime
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// implementation of new builtin
|
||||
// compiler (both frontend and SSA backend) knows the signature
|
||||
// of this function.
|
||||
func newobject(typ *_type) unsafe.Pointer {
|
||||
return AllocZ(typ.Size_)
|
||||
}
|
||||
|
||||
/*
|
||||
//go:linkname reflect_unsafe_New reflect.unsafe_New
|
||||
func reflect_unsafe_New(typ *_type) unsafe.Pointer {
|
||||
return mallocgc(typ.Size_, typ, true)
|
||||
}
|
||||
|
||||
//go:linkname reflectlite_unsafe_New internal/reflectlite.unsafe_New
|
||||
func reflectlite_unsafe_New(typ *_type) unsafe.Pointer {
|
||||
return mallocgc(typ.Size_, typ, true)
|
||||
}
|
||||
*/
|
||||
|
||||
const mathMaxUintptr = ^uintptr(0)
|
||||
|
||||
// mathMulUintptr returns a * b and whether the multiplication overflowed.
|
||||
// On supported platforms this is an intrinsic lowered by the compiler.
|
||||
func mathMulUintptr(a, b uintptr) (uintptr, bool) {
|
||||
if a|b < 1<<(4*goarchPtrSize) || a == 0 {
|
||||
return a * b, false
|
||||
}
|
||||
overflow := b > mathMaxUintptr/a
|
||||
return a * b, overflow
|
||||
}
|
||||
|
||||
// newarray allocates an array of n elements of type typ.
|
||||
func newarray(typ *_type, n int) unsafe.Pointer {
|
||||
if n == 1 {
|
||||
return AllocZ(typ.Size_)
|
||||
}
|
||||
mem, overflow := mathMulUintptr(typ.Size_, uintptr(n))
|
||||
if overflow || n < 0 {
|
||||
panic(plainError("runtime: allocation size out of range"))
|
||||
}
|
||||
return AllocZ(mem)
|
||||
}
|
||||
|
||||
/*
|
||||
//go:linkname reflect_unsafe_NewArray reflect.unsafe_NewArray
|
||||
func reflect_unsafe_NewArray(typ *_type, n int) unsafe.Pointer {
|
||||
return newarray(typ, n)
|
||||
}
|
||||
|
||||
func profilealloc(mp *m, x unsafe.Pointer, size uintptr) {
|
||||
c := getMCache(mp)
|
||||
if c == nil {
|
||||
throw("profilealloc called without a P or outside bootstrapping")
|
||||
}
|
||||
c.nextSample = nextSample()
|
||||
mProf_Malloc(x, size)
|
||||
}
|
||||
|
||||
// nextSample returns the next sampling point for heap profiling. The goal is
|
||||
// to sample allocations on average every MemProfileRate bytes, but with a
|
||||
// completely random distribution over the allocation timeline; this
|
||||
// corresponds to a Poisson process with parameter MemProfileRate. In Poisson
|
||||
// processes, the distance between two samples follows the exponential
|
||||
// distribution (exp(MemProfileRate)), so the best return value is a random
|
||||
// number taken from an exponential distribution whose mean is MemProfileRate.
|
||||
func nextSample() uintptr {
|
||||
if MemProfileRate == 1 {
|
||||
// Callers assign our return value to
|
||||
// mcache.next_sample, but next_sample is not used
|
||||
// when the rate is 1. So avoid the math below and
|
||||
// just return something.
|
||||
return 0
|
||||
}
|
||||
if GOOS == "plan9" {
|
||||
// Plan 9 doesn't support floating point in note handler.
|
||||
if gp := getg(); gp == gp.m.gsignal {
|
||||
return nextSampleNoFP()
|
||||
}
|
||||
}
|
||||
|
||||
return uintptr(fastexprand(MemProfileRate))
|
||||
}
|
||||
|
||||
// fastexprand returns a random number from an exponential distribution with
|
||||
// the specified mean.
|
||||
func fastexprand(mean int) int32 {
|
||||
// Avoid overflow. Maximum possible step is
|
||||
// -ln(1/(1<<randomBitCount)) * mean, approximately 20 * mean.
|
||||
switch {
|
||||
case mean > 0x7000000:
|
||||
mean = 0x7000000
|
||||
case mean == 0:
|
||||
return 0
|
||||
}
|
||||
|
||||
// Take a random sample of the exponential distribution exp(-mean*x).
|
||||
// The probability distribution function is mean*exp(-mean*x), so the CDF is
|
||||
// p = 1 - exp(-mean*x), so
|
||||
// q = 1 - p == exp(-mean*x)
|
||||
// log_e(q) = -mean*x
|
||||
// -log_e(q)/mean = x
|
||||
// x = -log_e(q) * mean
|
||||
// x = log_2(q) * (-log_e(2)) * mean ; Using log_2 for efficiency
|
||||
const randomBitCount = 26
|
||||
q := fastrandn(1<<randomBitCount) + 1
|
||||
qlog := fastlog2(float64(q)) - randomBitCount
|
||||
if qlog > 0 {
|
||||
qlog = 0
|
||||
}
|
||||
const minusLog2 = -0.6931471805599453 // -ln(2)
|
||||
return int32(qlog*(minusLog2*float64(mean))) + 1
|
||||
}
|
||||
|
||||
// nextSampleNoFP is similar to nextSample, but uses older,
|
||||
// simpler code to avoid floating point.
|
||||
func nextSampleNoFP() uintptr {
|
||||
// Set first allocation sample size.
|
||||
rate := MemProfileRate
|
||||
if rate > 0x3fffffff { // make 2*rate not overflow
|
||||
rate = 0x3fffffff
|
||||
}
|
||||
if rate != 0 {
|
||||
return uintptr(fastrandn(uint32(2 * rate)))
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type persistentAlloc struct {
|
||||
base *notInHeap
|
||||
off uintptr
|
||||
}
|
||||
|
||||
var globalAlloc struct {
|
||||
mutex
|
||||
persistentAlloc
|
||||
}
|
||||
|
||||
// persistentChunkSize is the number of bytes we allocate when we grow
|
||||
// a persistentAlloc.
|
||||
const persistentChunkSize = 256 << 10
|
||||
|
||||
// persistentChunks is a list of all the persistent chunks we have
|
||||
// allocated. The list is maintained through the first word in the
|
||||
// persistent chunk. This is updated atomically.
|
||||
var persistentChunks *notInHeap
|
||||
|
||||
// Wrapper around sysAlloc that can allocate small chunks.
|
||||
// There is no associated free operation.
|
||||
// Intended for things like function/type/debug-related persistent data.
|
||||
// If align is 0, uses default align (currently 8).
|
||||
// The returned memory will be zeroed.
|
||||
// sysStat must be non-nil.
|
||||
//
|
||||
// Consider marking persistentalloc'd types not in heap by embedding
|
||||
// runtime/internal/sys.NotInHeap.
|
||||
func persistentalloc(size, align uintptr, sysStat *sysMemStat) unsafe.Pointer {
|
||||
var p *notInHeap
|
||||
systemstack(func() {
|
||||
p = persistentalloc1(size, align, sysStat)
|
||||
})
|
||||
return unsafe.Pointer(p)
|
||||
}
|
||||
|
||||
// Must run on system stack because stack growth can (re)invoke it.
|
||||
// See issue 9174.
|
||||
//
|
||||
//go:systemstack
|
||||
func persistentalloc1(size, align uintptr, sysStat *sysMemStat) *notInHeap {
|
||||
const (
|
||||
maxBlock = 64 << 10 // VM reservation granularity is 64K on windows
|
||||
)
|
||||
|
||||
if size == 0 {
|
||||
throw("persistentalloc: size == 0")
|
||||
}
|
||||
if align != 0 {
|
||||
if align&(align-1) != 0 {
|
||||
throw("persistentalloc: align is not a power of 2")
|
||||
}
|
||||
if align > _PageSize {
|
||||
throw("persistentalloc: align is too large")
|
||||
}
|
||||
} else {
|
||||
align = 8
|
||||
}
|
||||
|
||||
if size >= maxBlock {
|
||||
return (*notInHeap)(sysAlloc(size, sysStat))
|
||||
}
|
||||
|
||||
mp := acquirem()
|
||||
var persistent *persistentAlloc
|
||||
if mp != nil && mp.p != 0 {
|
||||
persistent = &mp.p.ptr().palloc
|
||||
} else {
|
||||
lock(&globalAlloc.mutex)
|
||||
persistent = &globalAlloc.persistentAlloc
|
||||
}
|
||||
persistent.off = alignUp(persistent.off, align)
|
||||
if persistent.off+size > persistentChunkSize || persistent.base == nil {
|
||||
persistent.base = (*notInHeap)(sysAlloc(persistentChunkSize, &memstats.other_sys))
|
||||
if persistent.base == nil {
|
||||
if persistent == &globalAlloc.persistentAlloc {
|
||||
unlock(&globalAlloc.mutex)
|
||||
}
|
||||
throw("runtime: cannot allocate memory")
|
||||
}
|
||||
|
||||
// Add the new chunk to the persistentChunks list.
|
||||
for {
|
||||
chunks := uintptr(unsafe.Pointer(persistentChunks))
|
||||
*(*uintptr)(unsafe.Pointer(persistent.base)) = chunks
|
||||
if atomic.Casuintptr((*uintptr)(unsafe.Pointer(&persistentChunks)), chunks, uintptr(unsafe.Pointer(persistent.base))) {
|
||||
break
|
||||
}
|
||||
}
|
||||
persistent.off = alignUp(goarch.PtrSize, align)
|
||||
}
|
||||
p := persistent.base.add(persistent.off)
|
||||
persistent.off += size
|
||||
releasem(mp)
|
||||
if persistent == &globalAlloc.persistentAlloc {
|
||||
unlock(&globalAlloc.mutex)
|
||||
}
|
||||
|
||||
if sysStat != &memstats.other_sys {
|
||||
sysStat.add(int64(size))
|
||||
memstats.other_sys.add(-int64(size))
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
// inPersistentAlloc reports whether p points to memory allocated by
|
||||
// persistentalloc. This must be nosplit because it is called by the
|
||||
// cgo checker code, which is called by the write barrier code.
|
||||
//
|
||||
//go:nosplit
|
||||
func inPersistentAlloc(p uintptr) bool {
|
||||
chunk := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&persistentChunks)))
|
||||
for chunk != 0 {
|
||||
if p >= chunk && p < chunk+persistentChunkSize {
|
||||
return true
|
||||
}
|
||||
chunk = *(*uintptr)(unsafe.Pointer(chunk))
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// linearAlloc is a simple linear allocator that pre-reserves a region
|
||||
// of memory and then optionally maps that region into the Ready state
|
||||
// as needed.
|
||||
//
|
||||
// The caller is responsible for locking.
|
||||
type linearAlloc struct {
|
||||
next uintptr // next free byte
|
||||
mapped uintptr // one byte past end of mapped space
|
||||
end uintptr // end of reserved space
|
||||
|
||||
mapMemory bool // transition memory from Reserved to Ready if true
|
||||
}
|
||||
|
||||
func (l *linearAlloc) init(base, size uintptr, mapMemory bool) {
|
||||
if base+size < base {
|
||||
// Chop off the last byte. The runtime isn't prepared
|
||||
// to deal with situations where the bounds could overflow.
|
||||
// Leave that memory reserved, though, so we don't map it
|
||||
// later.
|
||||
size -= 1
|
||||
}
|
||||
l.next, l.mapped = base, base
|
||||
l.end = base + size
|
||||
l.mapMemory = mapMemory
|
||||
}
|
||||
|
||||
func (l *linearAlloc) alloc(size, align uintptr, sysStat *sysMemStat) unsafe.Pointer {
|
||||
p := alignUp(l.next, align)
|
||||
if p+size > l.end {
|
||||
return nil
|
||||
}
|
||||
l.next = p + size
|
||||
if pEnd := alignUp(l.next-1, physPageSize); pEnd > l.mapped {
|
||||
if l.mapMemory {
|
||||
// Transition from Reserved to Prepared to Ready.
|
||||
n := pEnd - l.mapped
|
||||
sysMap(unsafe.Pointer(l.mapped), n, sysStat)
|
||||
sysUsed(unsafe.Pointer(l.mapped), n, n)
|
||||
}
|
||||
l.mapped = pEnd
|
||||
}
|
||||
return unsafe.Pointer(p)
|
||||
}
|
||||
|
||||
// notInHeap is off-heap memory allocated by a lower-level allocator
|
||||
// like sysAlloc or persistentAlloc.
|
||||
//
|
||||
// In general, it's better to use real types which embed
|
||||
// runtime/internal/sys.NotInHeap, but this serves as a generic type
|
||||
// for situations where that isn't possible (like in the allocators).
|
||||
//
|
||||
// TODO: Use this as the return type of sysAlloc, persistentAlloc, etc?
|
||||
type notInHeap struct{ _ sys.NotInHeap }
|
||||
|
||||
func (p *notInHeap) add(bytes uintptr) *notInHeap {
|
||||
return (*notInHeap)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + bytes))
|
||||
}
|
||||
|
||||
// computeRZlog computes the size of the redzone.
|
||||
// Refer to the implementation of the compiler-rt.
|
||||
func computeRZlog(userSize uintptr) uintptr {
|
||||
switch {
|
||||
case userSize <= (64 - 16):
|
||||
return 16 << 0
|
||||
case userSize <= (128 - 32):
|
||||
return 16 << 1
|
||||
case userSize <= (512 - 64):
|
||||
return 16 << 2
|
||||
case userSize <= (4096 - 128):
|
||||
return 16 << 3
|
||||
case userSize <= (1<<14)-256:
|
||||
return 16 << 4
|
||||
case userSize <= (1<<15)-512:
|
||||
return 16 << 5
|
||||
case userSize <= (1<<16)-1024:
|
||||
return 16 << 6
|
||||
default:
|
||||
return 16 << 7
|
||||
}
|
||||
}
|
||||
*/
|
||||
@@ -59,6 +59,12 @@ import (
|
||||
"github.com/goplus/llgo/internal/abi"
|
||||
)
|
||||
|
||||
type maptype = abi.MapType
|
||||
|
||||
const (
|
||||
goarchPtrSize = unsafe.Sizeof(uintptr(0))
|
||||
)
|
||||
|
||||
const (
|
||||
// Maximum number of key/elem pairs a bucket can hold.
|
||||
bucketCntBits = abi.MapBucketCountBits
|
||||
@@ -74,8 +80,9 @@ const (
|
||||
// Must fit in a uint8.
|
||||
// Fast versions cannot handle big elems - the cutoff size for
|
||||
// fast versions in cmd/compile/internal/gc/walk.go must be at most this elem.
|
||||
maxKeySize = abi.MapMaxKeyBytes
|
||||
maxElemSize = abi.MapMaxElemBytes
|
||||
//
|
||||
// maxKeySize = abi.MapMaxKeyBytes
|
||||
// maxElemSize = abi.MapMaxElemBytes
|
||||
|
||||
// data offset should be the size of the bmap struct, but needs to be
|
||||
// aligned correctly. For amd64p32 this means 64-bit alignment
|
||||
@@ -179,11 +186,12 @@ type hiter struct {
|
||||
bucket uintptr
|
||||
checkBucket uintptr
|
||||
}
|
||||
*/
|
||||
|
||||
// bucketShift returns 1<<b, optimized for code generation.
|
||||
func bucketShift(b uint8) uintptr {
|
||||
// Masking the shift amount allows overflow checks to be elided.
|
||||
return uintptr(1) << (b & (goarch.PtrSize*8 - 1))
|
||||
return uintptr(1) << (b & uint8(goarchPtrSize*8-1))
|
||||
}
|
||||
|
||||
// bucketMask returns 1<<b - 1, optimized for code generation.
|
||||
@@ -193,7 +201,7 @@ func bucketMask(b uint8) uintptr {
|
||||
|
||||
// tophash calculates the tophash value for hash.
|
||||
func tophash(hash uintptr) uint8 {
|
||||
top := uint8(hash >> (goarch.PtrSize*8 - 8))
|
||||
top := uint8(hash >> (goarchPtrSize*8 - 8))
|
||||
if top < minTopHash {
|
||||
top += minTopHash
|
||||
}
|
||||
@@ -206,16 +214,18 @@ func evacuated(b *bmap) bool {
|
||||
}
|
||||
|
||||
func (b *bmap) overflow(t *maptype) *bmap {
|
||||
return *(**bmap)(add(unsafe.Pointer(b), uintptr(t.BucketSize)-goarch.PtrSize))
|
||||
return *(**bmap)(add(unsafe.Pointer(b), uintptr(t.BucketSize)-goarchPtrSize))
|
||||
}
|
||||
|
||||
func (b *bmap) setoverflow(t *maptype, ovf *bmap) {
|
||||
*(**bmap)(add(unsafe.Pointer(b), uintptr(t.BucketSize)-goarch.PtrSize)) = ovf
|
||||
*(**bmap)(add(unsafe.Pointer(b), uintptr(t.BucketSize)-goarchPtrSize)) = ovf
|
||||
}
|
||||
|
||||
/*
|
||||
func (b *bmap) keys() unsafe.Pointer {
|
||||
return add(unsafe.Pointer(b), dataOffset)
|
||||
}
|
||||
*/
|
||||
|
||||
// incrnoverflow increments h.noverflow.
|
||||
// noverflow counts the number of overflow buckets.
|
||||
@@ -280,6 +290,7 @@ func (h *hmap) createOverflow() {
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
func makemap64(t *maptype, hint int64, h *hmap) *hmap {
|
||||
if int64(int(hint)) != hint {
|
||||
hint = 0
|
||||
@@ -337,6 +348,7 @@ func makemap(t *maptype, hint int, h *hmap) *hmap {
|
||||
|
||||
return h
|
||||
}
|
||||
*/
|
||||
|
||||
// makeBucketArray initializes a backing array for map buckets.
|
||||
// 1<<b is the minimum number of buckets to allocate.
|
||||
@@ -389,6 +401,7 @@ func makeBucketArray(t *maptype, b uint8, dirtyalloc unsafe.Pointer) (buckets un
|
||||
return buckets, nextOverflow
|
||||
}
|
||||
|
||||
/*
|
||||
// mapaccess1 returns a pointer to h[key]. Never returns nil, instead
|
||||
// it will return a reference to the zero object for the elem type if
|
||||
// the key is not in the map.
|
||||
@@ -575,24 +588,13 @@ func mapaccess2_fat(t *maptype, h *hmap, key, zero unsafe.Pointer) (unsafe.Point
|
||||
}
|
||||
return e, true
|
||||
}
|
||||
*/
|
||||
|
||||
// Like mapaccess, but allocates a slot for the key if it is not present in the map.
|
||||
func mapassign(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
|
||||
if h == nil {
|
||||
panic(plainError("assignment to entry in nil map"))
|
||||
}
|
||||
if raceenabled {
|
||||
callerpc := getcallerpc()
|
||||
pc := abi.FuncPCABIInternal(mapassign)
|
||||
racewritepc(unsafe.Pointer(h), callerpc, pc)
|
||||
raceReadObjectPC(t.Key, key, callerpc, pc)
|
||||
}
|
||||
if msanenabled {
|
||||
msanread(key, t.Key.Size_)
|
||||
}
|
||||
if asanenabled {
|
||||
asanread(key, t.Key.Size_)
|
||||
}
|
||||
if h.flags&hashWriting != 0 {
|
||||
fatal("concurrent map writes")
|
||||
}
|
||||
@@ -694,6 +696,7 @@ done:
|
||||
return elem
|
||||
}
|
||||
|
||||
/*
|
||||
func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
|
||||
if raceenabled && h != nil {
|
||||
callerpc := getcallerpc()
|
||||
@@ -1055,6 +1058,7 @@ func mapclear(t *maptype, h *hmap) {
|
||||
}
|
||||
h.flags &^= hashWriting
|
||||
}
|
||||
*/
|
||||
|
||||
func hashGrow(t *maptype, h *hmap) {
|
||||
// If we've hit the load factor, get bigger.
|
||||
@@ -1305,6 +1309,7 @@ func advanceEvacuationMark(h *hmap, t *maptype, newbit uintptr) {
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
// Reflect stubs. Called from ../reflect/asm_*.s
|
||||
|
||||
//go:linkname reflect_makemap reflect.makemap
|
||||
|
||||
349
internal/runtime/mbarrier.go
Normal file
349
internal/runtime/mbarrier.go
Normal file
@@ -0,0 +1,349 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Garbage collector: write barriers.
|
||||
//
|
||||
// For the concurrent garbage collector, the Go compiler implements
|
||||
// updates to pointer-valued fields that may be in heap objects by
|
||||
// emitting calls to write barriers. The main write barrier for
|
||||
// individual pointer writes is gcWriteBarrier and is implemented in
|
||||
// assembly. This file contains write barrier entry points for bulk
|
||||
// operations. See also mwbbuf.go.
|
||||
|
||||
package runtime
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
|
||||
"github.com/goplus/llgo/internal/abi"
|
||||
)
|
||||
|
||||
/*
|
||||
import (
|
||||
"internal/abi"
|
||||
"internal/goarch"
|
||||
"internal/goexperiment"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// Go uses a hybrid barrier that combines a Yuasa-style deletion
|
||||
// barrier—which shades the object whose reference is being
|
||||
// overwritten—with Dijkstra insertion barrier—which shades the object
|
||||
// whose reference is being written. The insertion part of the barrier
|
||||
// is necessary while the calling goroutine's stack is grey. In
|
||||
// pseudocode, the barrier is:
|
||||
//
|
||||
// writePointer(slot, ptr):
|
||||
// shade(*slot)
|
||||
// if current stack is grey:
|
||||
// shade(ptr)
|
||||
// *slot = ptr
|
||||
//
|
||||
// slot is the destination in Go code.
|
||||
// ptr is the value that goes into the slot in Go code.
|
||||
//
|
||||
// Shade indicates that it has seen a white pointer by adding the referent
|
||||
// to wbuf as well as marking it.
|
||||
//
|
||||
// The two shades and the condition work together to prevent a mutator
|
||||
// from hiding an object from the garbage collector:
|
||||
//
|
||||
// 1. shade(*slot) prevents a mutator from hiding an object by moving
|
||||
// the sole pointer to it from the heap to its stack. If it attempts
|
||||
// to unlink an object from the heap, this will shade it.
|
||||
//
|
||||
// 2. shade(ptr) prevents a mutator from hiding an object by moving
|
||||
// the sole pointer to it from its stack into a black object in the
|
||||
// heap. If it attempts to install the pointer into a black object,
|
||||
// this will shade it.
|
||||
//
|
||||
// 3. Once a goroutine's stack is black, the shade(ptr) becomes
|
||||
// unnecessary. shade(ptr) prevents hiding an object by moving it from
|
||||
// the stack to the heap, but this requires first having a pointer
|
||||
// hidden on the stack. Immediately after a stack is scanned, it only
|
||||
// points to shaded objects, so it's not hiding anything, and the
|
||||
// shade(*slot) prevents it from hiding any other pointers on its
|
||||
// stack.
|
||||
//
|
||||
// For a detailed description of this barrier and proof of
|
||||
// correctness, see https://github.com/golang/proposal/blob/master/design/17503-eliminate-rescan.md
|
||||
//
|
||||
//
|
||||
//
|
||||
// Dealing with memory ordering:
|
||||
//
|
||||
// Both the Yuasa and Dijkstra barriers can be made conditional on the
|
||||
// color of the object containing the slot. We chose not to make these
|
||||
// conditional because the cost of ensuring that the object holding
|
||||
// the slot doesn't concurrently change color without the mutator
|
||||
// noticing seems prohibitive.
|
||||
//
|
||||
// Consider the following example where the mutator writes into
|
||||
// a slot and then loads the slot's mark bit while the GC thread
|
||||
// writes to the slot's mark bit and then as part of scanning reads
|
||||
// the slot.
|
||||
//
|
||||
// Initially both [slot] and [slotmark] are 0 (nil)
|
||||
// Mutator thread GC thread
|
||||
// st [slot], ptr st [slotmark], 1
|
||||
//
|
||||
// ld r1, [slotmark] ld r2, [slot]
|
||||
//
|
||||
// Without an expensive memory barrier between the st and the ld, the final
|
||||
// result on most HW (including 386/amd64) can be r1==r2==0. This is a classic
|
||||
// example of what can happen when loads are allowed to be reordered with older
|
||||
// stores (avoiding such reorderings lies at the heart of the classic
|
||||
// Peterson/Dekker algorithms for mutual exclusion). Rather than require memory
|
||||
// barriers, which will slow down both the mutator and the GC, we always grey
|
||||
// the ptr object regardless of the slot's color.
|
||||
//
|
||||
// Another place where we intentionally omit memory barriers is when
|
||||
// accessing mheap_.arena_used to check if a pointer points into the
|
||||
// heap. On relaxed memory machines, it's possible for a mutator to
|
||||
// extend the size of the heap by updating arena_used, allocate an
|
||||
// object from this new region, and publish a pointer to that object,
|
||||
// but for tracing running on another processor to observe the pointer
|
||||
// but use the old value of arena_used. In this case, tracing will not
|
||||
// mark the object, even though it's reachable. However, the mutator
|
||||
// is guaranteed to execute a write barrier when it publishes the
|
||||
// pointer, so it will take care of marking the object. A general
|
||||
// consequence of this is that the garbage collector may cache the
|
||||
// value of mheap_.arena_used. (See issue #9984.)
|
||||
//
|
||||
//
|
||||
// Stack writes:
|
||||
//
|
||||
// The compiler omits write barriers for writes to the current frame,
|
||||
// but if a stack pointer has been passed down the call stack, the
|
||||
// compiler will generate a write barrier for writes through that
|
||||
// pointer (because it doesn't know it's not a heap pointer).
|
||||
//
|
||||
//
|
||||
// Global writes:
|
||||
//
|
||||
// The Go garbage collector requires write barriers when heap pointers
|
||||
// are stored in globals. Many garbage collectors ignore writes to
|
||||
// globals and instead pick up global -> heap pointers during
|
||||
// termination. This increases pause time, so we instead rely on write
|
||||
// barriers for writes to globals so that we don't have to rescan
|
||||
// global during mark termination.
|
||||
//
|
||||
//
|
||||
// Publication ordering:
|
||||
//
|
||||
// The write barrier is *pre-publication*, meaning that the write
|
||||
// barrier happens prior to the *slot = ptr write that may make ptr
|
||||
// reachable by some goroutine that currently cannot reach it.
|
||||
//
|
||||
//
|
||||
// Signal handler pointer writes:
|
||||
//
|
||||
// In general, the signal handler cannot safely invoke the write
|
||||
// barrier because it may run without a P or even during the write
|
||||
// barrier.
|
||||
//
|
||||
// There is exactly one exception: profbuf.go omits a barrier during
|
||||
// signal handler profile logging. That's safe only because of the
|
||||
// deletion barrier. See profbuf.go for a detailed argument. If we
|
||||
// remove the deletion barrier, we'll have to work out a new way to
|
||||
// handle the profile logging.
|
||||
*/
|
||||
|
||||
// typedmemmove copies a value of type typ to dst from src.
|
||||
// Must be nosplit, see #16026.
|
||||
//
|
||||
// TODO: Perfect for go:nosplitrec since we can't have a safe point
|
||||
// anywhere in the bulk barrier or memmove.
|
||||
//
|
||||
//go:nosplit
|
||||
func typedmemmove(typ *abi.Type, dst, src unsafe.Pointer) {
|
||||
if dst == src {
|
||||
return
|
||||
}
|
||||
// There's a race here: if some other goroutine can write to
|
||||
// src, it may change some pointer in src after we've
|
||||
// performed the write barrier but before we perform the
|
||||
// memory copy. This safe because the write performed by that
|
||||
// other goroutine must also be accompanied by a write
|
||||
// barrier, so at worst we've unnecessarily greyed the old
|
||||
// pointer that was in src.
|
||||
memmove(dst, src, typ.Size_)
|
||||
}
|
||||
|
||||
/*
|
||||
// wbZero performs the write barrier operations necessary before
|
||||
// zeroing a region of memory at address dst of type typ.
|
||||
// Does not actually do the zeroing.
|
||||
//
|
||||
//go:nowritebarrierrec
|
||||
//go:nosplit
|
||||
func wbZero(typ *_type, dst unsafe.Pointer) {
|
||||
bulkBarrierPreWrite(uintptr(dst), 0, typ.PtrBytes)
|
||||
}
|
||||
|
||||
// wbMove performs the write barrier operations necessary before
|
||||
// copying a region of memory from src to dst of type typ.
|
||||
// Does not actually do the copying.
|
||||
//
|
||||
//go:nowritebarrierrec
|
||||
//go:nosplit
|
||||
func wbMove(typ *_type, dst, src unsafe.Pointer) {
|
||||
bulkBarrierPreWrite(uintptr(dst), uintptr(src), typ.PtrBytes)
|
||||
}
|
||||
|
||||
//go:linkname reflect_typedmemmove reflect.typedmemmove
|
||||
func reflect_typedmemmove(typ *_type, dst, src unsafe.Pointer) {
|
||||
if raceenabled {
|
||||
raceWriteObjectPC(typ, dst, getcallerpc(), abi.FuncPCABIInternal(reflect_typedmemmove))
|
||||
raceReadObjectPC(typ, src, getcallerpc(), abi.FuncPCABIInternal(reflect_typedmemmove))
|
||||
}
|
||||
if msanenabled {
|
||||
msanwrite(dst, typ.Size_)
|
||||
msanread(src, typ.Size_)
|
||||
}
|
||||
if asanenabled {
|
||||
asanwrite(dst, typ.Size_)
|
||||
asanread(src, typ.Size_)
|
||||
}
|
||||
typedmemmove(typ, dst, src)
|
||||
}
|
||||
|
||||
//go:linkname reflectlite_typedmemmove internal/reflectlite.typedmemmove
|
||||
func reflectlite_typedmemmove(typ *_type, dst, src unsafe.Pointer) {
|
||||
reflect_typedmemmove(typ, dst, src)
|
||||
}
|
||||
|
||||
// reflectcallmove is invoked by reflectcall to copy the return values
|
||||
// out of the stack and into the heap, invoking the necessary write
|
||||
// barriers. dst, src, and size describe the return value area to
|
||||
// copy. typ describes the entire frame (not just the return values).
|
||||
// typ may be nil, which indicates write barriers are not needed.
|
||||
//
|
||||
// It must be nosplit and must only call nosplit functions because the
|
||||
// stack map of reflectcall is wrong.
|
||||
//
|
||||
//go:nosplit
|
||||
func reflectcallmove(typ *_type, dst, src unsafe.Pointer, size uintptr, regs *abi.RegArgs) {
|
||||
if writeBarrier.needed && typ != nil && typ.PtrBytes != 0 && size >= goarch.PtrSize {
|
||||
bulkBarrierPreWrite(uintptr(dst), uintptr(src), size)
|
||||
}
|
||||
memmove(dst, src, size)
|
||||
|
||||
// Move pointers returned in registers to a place where the GC can see them.
|
||||
for i := range regs.Ints {
|
||||
if regs.ReturnIsPtr.Get(i) {
|
||||
regs.Ptrs[i] = unsafe.Pointer(regs.Ints[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//go:nosplit
|
||||
func typedslicecopy(typ *_type, dstPtr unsafe.Pointer, dstLen int, srcPtr unsafe.Pointer, srcLen int) int {
|
||||
n := dstLen
|
||||
if n > srcLen {
|
||||
n = srcLen
|
||||
}
|
||||
if n == 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
// The compiler emits calls to typedslicecopy before
|
||||
// instrumentation runs, so unlike the other copying and
|
||||
// assignment operations, it's not instrumented in the calling
|
||||
// code and needs its own instrumentation.
|
||||
if raceenabled {
|
||||
callerpc := getcallerpc()
|
||||
pc := abi.FuncPCABIInternal(slicecopy)
|
||||
racewriterangepc(dstPtr, uintptr(n)*typ.Size_, callerpc, pc)
|
||||
racereadrangepc(srcPtr, uintptr(n)*typ.Size_, callerpc, pc)
|
||||
}
|
||||
if msanenabled {
|
||||
msanwrite(dstPtr, uintptr(n)*typ.Size_)
|
||||
msanread(srcPtr, uintptr(n)*typ.Size_)
|
||||
}
|
||||
if asanenabled {
|
||||
asanwrite(dstPtr, uintptr(n)*typ.Size_)
|
||||
asanread(srcPtr, uintptr(n)*typ.Size_)
|
||||
}
|
||||
|
||||
if goexperiment.CgoCheck2 {
|
||||
cgoCheckSliceCopy(typ, dstPtr, srcPtr, n)
|
||||
}
|
||||
|
||||
if dstPtr == srcPtr {
|
||||
return n
|
||||
}
|
||||
|
||||
// Note: No point in checking typ.PtrBytes here:
|
||||
// compiler only emits calls to typedslicecopy for types with pointers,
|
||||
// and growslice and reflect_typedslicecopy check for pointers
|
||||
// before calling typedslicecopy.
|
||||
size := uintptr(n) * typ.Size_
|
||||
if writeBarrier.needed {
|
||||
pwsize := size - typ.Size_ + typ.PtrBytes
|
||||
bulkBarrierPreWrite(uintptr(dstPtr), uintptr(srcPtr), pwsize)
|
||||
}
|
||||
// See typedmemmove for a discussion of the race between the
|
||||
// barrier and memmove.
|
||||
memmove(dstPtr, srcPtr, size)
|
||||
return n
|
||||
}
|
||||
|
||||
//go:linkname reflect_typedslicecopy reflect.typedslicecopy
|
||||
func reflect_typedslicecopy(elemType *_type, dst, src slice) int {
|
||||
if elemType.PtrBytes == 0 {
|
||||
return slicecopy(dst.array, dst.len, src.array, src.len, elemType.Size_)
|
||||
}
|
||||
return typedslicecopy(elemType, dst.array, dst.len, src.array, src.len)
|
||||
}
|
||||
|
||||
// typedmemclr clears the typed memory at ptr with type typ. The
|
||||
// memory at ptr must already be initialized (and hence in type-safe
|
||||
// state). If the memory is being initialized for the first time, see
|
||||
// memclrNoHeapPointers.
|
||||
//
|
||||
// If the caller knows that typ has pointers, it can alternatively
|
||||
// call memclrHasPointers.
|
||||
//
|
||||
// TODO: A "go:nosplitrec" annotation would be perfect for this.
|
||||
//
|
||||
//go:nosplit
|
||||
func typedmemclr(typ *_type, ptr unsafe.Pointer) {
|
||||
if writeBarrier.needed && typ.PtrBytes != 0 {
|
||||
bulkBarrierPreWrite(uintptr(ptr), 0, typ.PtrBytes)
|
||||
}
|
||||
memclrNoHeapPointers(ptr, typ.Size_)
|
||||
}
|
||||
|
||||
//go:linkname reflect_typedmemclr reflect.typedmemclr
|
||||
func reflect_typedmemclr(typ *_type, ptr unsafe.Pointer) {
|
||||
typedmemclr(typ, ptr)
|
||||
}
|
||||
|
||||
//go:linkname reflect_typedmemclrpartial reflect.typedmemclrpartial
|
||||
func reflect_typedmemclrpartial(typ *_type, ptr unsafe.Pointer, off, size uintptr) {
|
||||
if writeBarrier.needed && typ.PtrBytes != 0 {
|
||||
bulkBarrierPreWrite(uintptr(ptr), 0, size)
|
||||
}
|
||||
memclrNoHeapPointers(ptr, size)
|
||||
}
|
||||
|
||||
//go:linkname reflect_typedarrayclear reflect.typedarrayclear
|
||||
func reflect_typedarrayclear(typ *_type, ptr unsafe.Pointer, len int) {
|
||||
size := typ.Size_ * uintptr(len)
|
||||
if writeBarrier.needed && typ.PtrBytes != 0 {
|
||||
bulkBarrierPreWrite(uintptr(ptr), 0, size)
|
||||
}
|
||||
memclrNoHeapPointers(ptr, size)
|
||||
}
|
||||
*/
|
||||
|
||||
// memclrHasPointers clears n bytes of typed memory starting at ptr.
|
||||
// The caller must ensure that the type of the object at ptr has
|
||||
// pointers, usually by checking typ.PtrBytes. However, ptr
|
||||
// does not have to point to the start of the allocation.
|
||||
func memclrHasPointers(ptr unsafe.Pointer, n uintptr) {
|
||||
bulkBarrierPreWrite(uintptr(ptr), 0, n)
|
||||
memclrNoHeapPointers(ptr, n)
|
||||
}
|
||||
1446
internal/runtime/mbitmap.go
Normal file
1446
internal/runtime/mbitmap.go
Normal file
File diff suppressed because it is too large
Load Diff
29
internal/runtime/msize.go
Normal file
29
internal/runtime/msize.go
Normal file
@@ -0,0 +1,29 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Malloc small size classes.
|
||||
//
|
||||
// See malloc.go for overview.
|
||||
// See also mksizeclasses.go for how we decide what size classes to use.
|
||||
|
||||
package runtime
|
||||
|
||||
// Returns size of the memory block that mallocgc will allocate if you ask for the size.
|
||||
func roundupsize(size uintptr) uintptr {
|
||||
return size
|
||||
}
|
||||
|
||||
/* if size < _MaxSmallSize {
|
||||
if size <= smallSizeMax-8 {
|
||||
return uintptr(class_to_size[size_to_class8[divRoundUp(size, smallSizeDiv)]])
|
||||
} else {
|
||||
return uintptr(class_to_size[size_to_class128[divRoundUp(size-smallSizeMax, largeSizeDiv)]])
|
||||
}
|
||||
}
|
||||
if size+_PageSize < size {
|
||||
return size
|
||||
}
|
||||
return alignUp(size, _PageSize)
|
||||
}
|
||||
*/
|
||||
@@ -1052,21 +1052,14 @@ func sync_throw(s string) {
|
||||
func sync_fatal(s string) {
|
||||
fatal(s)
|
||||
}
|
||||
*/
|
||||
|
||||
// throw triggers a fatal error that dumps a stack trace and exits.
|
||||
//
|
||||
// throw should be used for runtime-internal fatal errors where Go itself,
|
||||
// rather than user code, may be at fault for the failure.
|
||||
//
|
||||
//go:nosplit
|
||||
func throw(s string) {
|
||||
// Everything throw does should be recursively nosplit so it
|
||||
// can be called even when it's unsafe to grow the stack.
|
||||
systemstack(func() {
|
||||
print("fatal error: ", s, "\n")
|
||||
})
|
||||
|
||||
fatalthrow(throwTypeRuntime)
|
||||
fatal(s)
|
||||
}
|
||||
|
||||
// fatal triggers a fatal error that dumps a stack trace and exits.
|
||||
@@ -1076,10 +1069,11 @@ func throw(s string) {
|
||||
//
|
||||
// fatal does not include runtime frames, system goroutines, or frame metadata
|
||||
// (fp, sp, pc) in the stack trace unless GOTRACEBACK=system or higher.
|
||||
//
|
||||
//go:nosplit
|
||||
func fatal(s string) {
|
||||
// Everything fatal does should be recursively nosplit so it
|
||||
panic("fatal error: " + s)
|
||||
}
|
||||
|
||||
/* // Everything throw does should be recursively nosplit so it
|
||||
// can be called even when it's unsafe to grow the stack.
|
||||
systemstack(func() {
|
||||
print("fatal error: ", s, "\n")
|
||||
@@ -1088,6 +1082,7 @@ func fatal(s string) {
|
||||
fatalthrow(throwTypeUser)
|
||||
}
|
||||
|
||||
/*
|
||||
// runningPanicDefers is non-zero while running deferred functions for panic.
|
||||
// This is used to try hard to get a panic stack trace out when exiting.
|
||||
var runningPanicDefers atomic.Uint32
|
||||
@@ -1139,17 +1134,7 @@ func recovery(gp *g) {
|
||||
// fatalthrow implements an unrecoverable runtime throw. It freezes the
|
||||
// system, prints stack traces starting from its caller, and terminates the
|
||||
// process.
|
||||
//
|
||||
//go:nosplit
|
||||
func fatalthrow(t throwType) {
|
||||
pc := getcallerpc()
|
||||
sp := getcallersp()
|
||||
gp := getg()
|
||||
|
||||
if gp.m.throwing == throwTypeNone {
|
||||
gp.m.throwing = t
|
||||
}
|
||||
|
||||
// Switch to the system stack to avoid any stack growth, which may make
|
||||
// things worse if the runtime is in a bad state.
|
||||
systemstack(func() {
|
||||
@@ -1172,6 +1157,7 @@ func fatalthrow(t throwType) {
|
||||
*(*int)(nil) = 0 // not reached
|
||||
}
|
||||
|
||||
/*
|
||||
// fatalpanic implements an unrecoverable panic. It is like fatalthrow, except
|
||||
// that if msgs != nil, fatalpanic also prints panic messages and decrements
|
||||
// runningPanicDefers once main is blocked from exiting.
|
||||
|
||||
@@ -4,27 +4,148 @@
|
||||
|
||||
package runtime
|
||||
|
||||
import _ "unsafe"
|
||||
import (
|
||||
"unsafe"
|
||||
|
||||
"github.com/goplus/llgo/c"
|
||||
)
|
||||
|
||||
// Should be a built-in for unsafe.Pointer?
|
||||
//
|
||||
//go:linkname add llgo.advance
|
||||
func add(p unsafe.Pointer, x uintptr) unsafe.Pointer
|
||||
|
||||
/*
|
||||
// mcall switches from the g to the g0 stack and invokes fn(g),
|
||||
// where g is the goroutine that made the call.
|
||||
// mcall saves g's current PC/SP in g->sched so that it can be restored later.
|
||||
// It is up to fn to arrange for that later execution, typically by recording
|
||||
// g in a data structure, causing something to call ready(g) later.
|
||||
// mcall returns to the original goroutine g later, when g has been rescheduled.
|
||||
// fn must not return at all; typically it ends by calling schedule, to let the m
|
||||
// run other goroutines.
|
||||
//
|
||||
// mcall can only be called from g stacks (not g0, not gsignal).
|
||||
//
|
||||
// This must NOT be go:noescape: if fn is a stack-allocated closure,
|
||||
// fn puts g on a run queue, and g executes before fn returns, the
|
||||
// closure will be invalidated while it is still executing.
|
||||
func mcall(fn func(*g))
|
||||
|
||||
// systemstack runs fn on a system stack.
|
||||
// If systemstack is called from the per-OS-thread (g0) stack, or
|
||||
// if systemstack is called from the signal handling (gsignal) stack,
|
||||
// systemstack calls fn directly and returns.
|
||||
// Otherwise, systemstack is being called from the limited stack
|
||||
// of an ordinary goroutine. In this case, systemstack switches
|
||||
// to the per-OS-thread stack, calls fn, and switches back.
|
||||
// It is common to use a func literal as the argument, in order
|
||||
// to share inputs and outputs with the code around the call
|
||||
// to system stack:
|
||||
//
|
||||
// ... set up y ...
|
||||
// systemstack(func() {
|
||||
// x = bigcall(y)
|
||||
// })
|
||||
// ... use x ...
|
||||
//
|
||||
//go:noescape
|
||||
func systemstack(fn func())
|
||||
|
||||
//go:nosplit
|
||||
//go:nowritebarrierrec
|
||||
func badsystemstack() {
|
||||
writeErrStr("fatal: systemstack called from unexpected goroutine")
|
||||
}
|
||||
*/
|
||||
|
||||
// memclrNoHeapPointers clears n bytes starting at ptr.
|
||||
//
|
||||
// Usually you should use typedmemclr. memclrNoHeapPointers should be
|
||||
// used only when the caller knows that *ptr contains no heap pointers
|
||||
// because either:
|
||||
//
|
||||
// *ptr is initialized memory and its type is pointer-free, or
|
||||
//
|
||||
// *ptr is uninitialized memory (e.g., memory that's being reused
|
||||
// for a new allocation) and hence contains only "junk".
|
||||
//
|
||||
// memclrNoHeapPointers ensures that if ptr is pointer-aligned, and n
|
||||
// is a multiple of the pointer size, then any pointer-aligned,
|
||||
// pointer-sized portion is cleared atomically. Despite the function
|
||||
// name, this is necessary because this function is the underlying
|
||||
// implementation of typedmemclr and memclrHasPointers. See the doc of
|
||||
// memmove for more details.
|
||||
//
|
||||
// The (CPU-specific) implementations of this function are in memclr_*.s.
|
||||
func memclrNoHeapPointers(ptr unsafe.Pointer, n uintptr) {
|
||||
c.Memset(ptr, 0, n)
|
||||
}
|
||||
|
||||
// Zeroinit initializes memory to zero.
|
||||
func Zeroinit(p unsafe.Pointer, size uintptr) unsafe.Pointer {
|
||||
return c.Memset(p, 0, size)
|
||||
}
|
||||
|
||||
/*
|
||||
//go:linkname reflect_memclrNoHeapPointers reflect.memclrNoHeapPointers
|
||||
func reflect_memclrNoHeapPointers(ptr unsafe.Pointer, n uintptr) {
|
||||
memclrNoHeapPointers(ptr, n)
|
||||
}
|
||||
*/
|
||||
|
||||
// memmove copies n bytes from "from" to "to".
|
||||
//
|
||||
// memmove ensures that any pointer in "from" is written to "to" with
|
||||
// an indivisible write, so that racy reads cannot observe a
|
||||
// half-written pointer. This is necessary to prevent the garbage
|
||||
// collector from observing invalid pointers, and differs from memmove
|
||||
// in unmanaged languages. However, memmove is only required to do
|
||||
// this if "from" and "to" may contain pointers, which can only be the
|
||||
// case if "from", "to", and "n" are all be word-aligned.
|
||||
//
|
||||
//go:linkname memmove C.memmove
|
||||
func memmove(to, from unsafe.Pointer, n uintptr)
|
||||
|
||||
/*
|
||||
// Outside assembly calls memmove. Make sure it has ABI wrappers.
|
||||
//
|
||||
//go:linkname memmove
|
||||
|
||||
//go:linkname reflect_memmove reflect.memmove
|
||||
func reflect_memmove(to, from unsafe.Pointer, n uintptr) {
|
||||
memmove(to, from, n)
|
||||
}
|
||||
|
||||
// exported value for testing
|
||||
const hashLoad = float32(loadFactorNum) / float32(loadFactorDen)
|
||||
*/
|
||||
|
||||
//go:linkname fastrand C.rand
|
||||
func fastrand() uint32
|
||||
|
||||
/* TODO(xsw):
|
||||
func fastrand() uint32 {
|
||||
/*
|
||||
//go:nosplit
|
||||
func fastrandn(n uint32) uint32 {
|
||||
// This is similar to fastrand() % n, but faster.
|
||||
// See https://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction/
|
||||
return uint32(uint64(fastrand()) * uint64(n) >> 32)
|
||||
}
|
||||
|
||||
func fastrand64() uint64 {
|
||||
mp := getg().m
|
||||
// Implement wyrand: https://github.com/wangyi-fudan/wyhash
|
||||
// Only the platform that math.Mul64 can be lowered
|
||||
// by the compiler should be in this list.
|
||||
if goarch.IsAmd64|goarch.IsArm64|goarch.IsPpc64|
|
||||
goarch.IsPpc64le|goarch.IsMips64|goarch.IsMips64le|
|
||||
goarch.IsS390x|goarch.IsRiscv64|goarch.IsLoong64 == 1 {
|
||||
goarch.IsS390x|goarch.IsRiscv64 == 1 {
|
||||
mp.fastrand += 0xa0761d6478bd642f
|
||||
hi, lo := math.Mul64(mp.fastrand, mp.fastrand^0xe7037ed1a0b428db)
|
||||
return uint32(hi ^ lo)
|
||||
return hi ^ lo
|
||||
}
|
||||
|
||||
// Implement xorshift64+: 2 32-bit xorshift sequences added together.
|
||||
// Shift triplet [17,7,16] was calculated as indicated in Marsaglia's
|
||||
// Xorshift paper: https://www.jstatsoft.org/article/view/v008i14/xorshift.pdf
|
||||
// This generator passes the SmallCrush suite, part of TestU01 framework:
|
||||
// http://simul.iro.umontreal.ca/testu01/tu01.html
|
||||
@@ -32,7 +153,326 @@ func fastrand() uint32 {
|
||||
s1, s0 := t[0], t[1]
|
||||
s1 ^= s1 << 17
|
||||
s1 = s1 ^ s0 ^ s1>>7 ^ s0>>16
|
||||
r := uint64(s0 + s1)
|
||||
|
||||
s0, s1 = s1, s0
|
||||
s1 ^= s1 << 17
|
||||
s1 = s1 ^ s0 ^ s1>>7 ^ s0>>16
|
||||
r += uint64(s0+s1) << 32
|
||||
|
||||
t[0], t[1] = s0, s1
|
||||
return s0 + s1
|
||||
return r
|
||||
}
|
||||
|
||||
func fastrandu() uint {
|
||||
if goarch.PtrSize == 4 {
|
||||
return uint(fastrand())
|
||||
}
|
||||
return uint(fastrand64())
|
||||
}
|
||||
|
||||
//go:linkname rand_fastrand64 math/rand.fastrand64
|
||||
func rand_fastrand64() uint64 { return fastrand64() }
|
||||
|
||||
//go:linkname sync_fastrandn sync.fastrandn
|
||||
func sync_fastrandn(n uint32) uint32 { return fastrandn(n) }
|
||||
|
||||
//go:linkname net_fastrandu net.fastrandu
|
||||
func net_fastrandu() uint { return fastrandu() }
|
||||
|
||||
//go:linkname os_fastrand os.fastrand
|
||||
func os_fastrand() uint32 { return fastrand() }
|
||||
|
||||
// in internal/bytealg/equal_*.s
|
||||
//
|
||||
//go:noescape
|
||||
func memequal(a, b unsafe.Pointer, size uintptr) bool
|
||||
|
||||
// noescape hides a pointer from escape analysis. noescape is
|
||||
// the identity function but escape analysis doesn't think the
|
||||
// output depends on the input. noescape is inlined and currently
|
||||
// compiles down to zero instructions.
|
||||
// USE CAREFULLY!
|
||||
//
|
||||
//go:nosplit
|
||||
func noescape(p unsafe.Pointer) unsafe.Pointer {
|
||||
x := uintptr(p)
|
||||
return unsafe.Pointer(x ^ 0)
|
||||
}
|
||||
|
||||
// noEscapePtr hides a pointer from escape analysis. See noescape.
|
||||
// USE CAREFULLY!
|
||||
//
|
||||
//go:nosplit
|
||||
func noEscapePtr[T any](p *T) *T {
|
||||
x := uintptr(unsafe.Pointer(p))
|
||||
return (*T)(unsafe.Pointer(x ^ 0))
|
||||
}
|
||||
|
||||
// Not all cgocallback frames are actually cgocallback,
|
||||
// so not all have these arguments. Mark them uintptr so that the GC
|
||||
// does not misinterpret memory when the arguments are not present.
|
||||
// cgocallback is not called from Go, only from crosscall2.
|
||||
// This in turn calls cgocallbackg, which is where we'll find
|
||||
// pointer-declared arguments.
|
||||
//
|
||||
// When fn is nil (frame is saved g), call dropm instead,
|
||||
// this is used when the C thread is exiting.
|
||||
func cgocallback(fn, frame, ctxt uintptr)
|
||||
|
||||
func gogo(buf *gobuf)
|
||||
|
||||
func asminit()
|
||||
func setg(gg *g)
|
||||
func breakpoint()
|
||||
|
||||
// reflectcall calls fn with arguments described by stackArgs, stackArgsSize,
|
||||
// frameSize, and regArgs.
|
||||
//
|
||||
// Arguments passed on the stack and space for return values passed on the stack
|
||||
// must be laid out at the space pointed to by stackArgs (with total length
|
||||
// stackArgsSize) according to the ABI.
|
||||
//
|
||||
// stackRetOffset must be some value <= stackArgsSize that indicates the
|
||||
// offset within stackArgs where the return value space begins.
|
||||
//
|
||||
// frameSize is the total size of the argument frame at stackArgs and must
|
||||
// therefore be >= stackArgsSize. It must include additional space for spilling
|
||||
// register arguments for stack growth and preemption.
|
||||
//
|
||||
// TODO(mknyszek): Once we don't need the additional spill space, remove frameSize,
|
||||
// since frameSize will be redundant with stackArgsSize.
|
||||
//
|
||||
// Arguments passed in registers must be laid out in regArgs according to the ABI.
|
||||
// regArgs will hold any return values passed in registers after the call.
|
||||
//
|
||||
// reflectcall copies stack arguments from stackArgs to the goroutine stack, and
|
||||
// then copies back stackArgsSize-stackRetOffset bytes back to the return space
|
||||
// in stackArgs once fn has completed. It also "unspills" argument registers from
|
||||
// regArgs before calling fn, and spills them back into regArgs immediately
|
||||
// following the call to fn. If there are results being returned on the stack,
|
||||
// the caller should pass the argument frame type as stackArgsType so that
|
||||
// reflectcall can execute appropriate write barriers during the copy.
|
||||
//
|
||||
// reflectcall expects regArgs.ReturnIsPtr to be populated indicating which
|
||||
// registers on the return path will contain Go pointers. It will then store
|
||||
// these pointers in regArgs.Ptrs such that they are visible to the GC.
|
||||
//
|
||||
// Package reflect passes a frame type. In package runtime, there is only
|
||||
// one call that copies results back, in callbackWrap in syscall_windows.go, and it
|
||||
// does NOT pass a frame type, meaning there are no write barriers invoked. See that
|
||||
// call site for justification.
|
||||
//
|
||||
// Package reflect accesses this symbol through a linkname.
|
||||
//
|
||||
// Arguments passed through to reflectcall do not escape. The type is used
|
||||
// only in a very limited callee of reflectcall, the stackArgs are copied, and
|
||||
// regArgs is only used in the reflectcall frame.
|
||||
//
|
||||
//go:noescape
|
||||
func reflectcall(stackArgsType *_type, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
|
||||
|
||||
func procyield(cycles uint32)
|
||||
|
||||
type neverCallThisFunction struct{}
|
||||
|
||||
// goexit is the return stub at the top of every goroutine call stack.
|
||||
// Each goroutine stack is constructed as if goexit called the
|
||||
// goroutine's entry point function, so that when the entry point
|
||||
// function returns, it will return to goexit, which will call goexit1
|
||||
// to perform the actual exit.
|
||||
//
|
||||
// This function must never be called directly. Call goexit1 instead.
|
||||
// gentraceback assumes that goexit terminates the stack. A direct
|
||||
// call on the stack will cause gentraceback to stop walking the stack
|
||||
// prematurely and if there is leftover state it may panic.
|
||||
func goexit(neverCallThisFunction)
|
||||
|
||||
// publicationBarrier performs a store/store barrier (a "publication"
|
||||
// or "export" barrier). Some form of synchronization is required
|
||||
// between initializing an object and making that object accessible to
|
||||
// another processor. Without synchronization, the initialization
|
||||
// writes and the "publication" write may be reordered, allowing the
|
||||
// other processor to follow the pointer and observe an uninitialized
|
||||
// object. In general, higher-level synchronization should be used,
|
||||
// such as locking or an atomic pointer write. publicationBarrier is
|
||||
// for when those aren't an option, such as in the implementation of
|
||||
// the memory manager.
|
||||
//
|
||||
// There's no corresponding barrier for the read side because the read
|
||||
// side naturally has a data dependency order. All architectures that
|
||||
// Go supports or seems likely to ever support automatically enforce
|
||||
// data dependency ordering.
|
||||
func publicationBarrier()
|
||||
|
||||
// getcallerpc returns the program counter (PC) of its caller's caller.
|
||||
// getcallersp returns the stack pointer (SP) of its caller's caller.
|
||||
// The implementation may be a compiler intrinsic; there is not
|
||||
// necessarily code implementing this on every platform.
|
||||
//
|
||||
// For example:
|
||||
//
|
||||
// func f(arg1, arg2, arg3 int) {
|
||||
// pc := getcallerpc()
|
||||
// sp := getcallersp()
|
||||
// }
|
||||
//
|
||||
// These two lines find the PC and SP immediately following
|
||||
// the call to f (where f will return).
|
||||
//
|
||||
// The call to getcallerpc and getcallersp must be done in the
|
||||
// frame being asked about.
|
||||
//
|
||||
// The result of getcallersp is correct at the time of the return,
|
||||
// but it may be invalidated by any subsequent call to a function
|
||||
// that might relocate the stack in order to grow or shrink it.
|
||||
// A general rule is that the result of getcallersp should be used
|
||||
// immediately and can only be passed to nosplit functions.
|
||||
|
||||
//go:noescape
|
||||
func getcallerpc() uintptr
|
||||
|
||||
//go:noescape
|
||||
func getcallersp() uintptr // implemented as an intrinsic on all platforms
|
||||
|
||||
// getclosureptr returns the pointer to the current closure.
|
||||
// getclosureptr can only be used in an assignment statement
|
||||
// at the entry of a function. Moreover, go:nosplit directive
|
||||
// must be specified at the declaration of caller function,
|
||||
// so that the function prolog does not clobber the closure register.
|
||||
// for example:
|
||||
//
|
||||
// //go:nosplit
|
||||
// func f(arg1, arg2, arg3 int) {
|
||||
// dx := getclosureptr()
|
||||
// }
|
||||
//
|
||||
// The compiler rewrites calls to this function into instructions that fetch the
|
||||
// pointer from a well-known register (DX on x86 architecture, etc.) directly.
|
||||
func getclosureptr() uintptr
|
||||
|
||||
//go:noescape
|
||||
func asmcgocall(fn, arg unsafe.Pointer) int32
|
||||
|
||||
func morestack()
|
||||
func morestack_noctxt()
|
||||
func rt0_go()
|
||||
|
||||
// return0 is a stub used to return 0 from deferproc.
|
||||
// It is called at the very end of deferproc to signal
|
||||
// the calling Go function that it should not jump
|
||||
// to deferreturn.
|
||||
// in asm_*.s
|
||||
func return0()
|
||||
|
||||
// in asm_*.s
|
||||
// not called directly; definitions here supply type information for traceback.
|
||||
// These must have the same signature (arg pointer map) as reflectcall.
|
||||
func call16(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
|
||||
func call32(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
|
||||
func call64(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
|
||||
func call128(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
|
||||
func call256(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
|
||||
func call512(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
|
||||
func call1024(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
|
||||
func call2048(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
|
||||
func call4096(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
|
||||
func call8192(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
|
||||
func call16384(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
|
||||
func call32768(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
|
||||
func call65536(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
|
||||
func call131072(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
|
||||
func call262144(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
|
||||
func call524288(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
|
||||
func call1048576(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
|
||||
func call2097152(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
|
||||
func call4194304(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
|
||||
func call8388608(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
|
||||
func call16777216(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
|
||||
func call33554432(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
|
||||
func call67108864(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
|
||||
func call134217728(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
|
||||
func call268435456(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
|
||||
func call536870912(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
|
||||
func call1073741824(typ, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
|
||||
|
||||
func systemstack_switch()
|
||||
|
||||
// alignUp rounds n up to a multiple of a. a must be a power of 2.
|
||||
func alignUp(n, a uintptr) uintptr {
|
||||
return (n + a - 1) &^ (a - 1)
|
||||
}
|
||||
|
||||
// alignDown rounds n down to a multiple of a. a must be a power of 2.
|
||||
func alignDown(n, a uintptr) uintptr {
|
||||
return n &^ (a - 1)
|
||||
}
|
||||
|
||||
// divRoundUp returns ceil(n / a).
|
||||
func divRoundUp(n, a uintptr) uintptr {
|
||||
// a is generally a power of two. This will get inlined and
|
||||
// the compiler will optimize the division.
|
||||
return (n + a - 1) / a
|
||||
}
|
||||
|
||||
// checkASM reports whether assembly runtime checks have passed.
|
||||
func checkASM() bool
|
||||
|
||||
func memequal_varlen(a, b unsafe.Pointer) bool
|
||||
|
||||
// bool2int returns 0 if x is false or 1 if x is true.
|
||||
func bool2int(x bool) int {
|
||||
// Avoid branches. In the SSA compiler, this compiles to
|
||||
// exactly what you would want it to.
|
||||
return int(uint8(*(*uint8)(unsafe.Pointer(&x))))
|
||||
}
|
||||
|
||||
// abort crashes the runtime in situations where even throw might not
|
||||
// work. In general it should do something a debugger will recognize
|
||||
// (e.g., an INT3 on x86). A crash in abort is recognized by the
|
||||
// signal handler, which will attempt to tear down the runtime
|
||||
// immediately.
|
||||
func abort()
|
||||
|
||||
// Called from compiled code; declared for vet; do NOT call from Go.
|
||||
func gcWriteBarrier1()
|
||||
func gcWriteBarrier2()
|
||||
func gcWriteBarrier3()
|
||||
func gcWriteBarrier4()
|
||||
func gcWriteBarrier5()
|
||||
func gcWriteBarrier6()
|
||||
func gcWriteBarrier7()
|
||||
func gcWriteBarrier8()
|
||||
func duffzero()
|
||||
func duffcopy()
|
||||
|
||||
// Called from linker-generated .initarray; declared for go vet; do NOT call from Go.
|
||||
func addmoduledata()
|
||||
|
||||
// Injected by the signal handler for panicking signals.
|
||||
// Initializes any registers that have fixed meaning at calls but
|
||||
// are scratch in bodies and calls sigpanic.
|
||||
// On many platforms it just jumps to sigpanic.
|
||||
func sigpanic0()
|
||||
|
||||
// intArgRegs is used by the various register assignment
|
||||
// algorithm implementations in the runtime. These include:.
|
||||
// - Finalizers (mfinal.go)
|
||||
// - Windows callbacks (syscall_windows.go)
|
||||
//
|
||||
// Both are stripped-down versions of the algorithm since they
|
||||
// only have to deal with a subset of cases (finalizers only
|
||||
// take a pointer or interface argument, Go Windows callbacks
|
||||
// don't support floating point).
|
||||
//
|
||||
// It should be modified with care and are generally only
|
||||
// modified when testing this package.
|
||||
//
|
||||
// It should never be set higher than its internal/abi
|
||||
// constant counterparts, because the system relies on a
|
||||
// structure that is at least large enough to hold the
|
||||
// registers the system supports.
|
||||
//
|
||||
// Protected by finlock.
|
||||
var intArgRegs = abi.IntArgRegs
|
||||
*/
|
||||
|
||||
@@ -6,13 +6,13 @@
|
||||
|
||||
package runtime
|
||||
|
||||
/*
|
||||
import (
|
||||
"github.com/goplus/llgo/internal/abi"
|
||||
)
|
||||
|
||||
type _type = abi.Type
|
||||
|
||||
/*
|
||||
type maptype = abi.MapType
|
||||
|
||||
type arraytype = abi.ArrayType
|
||||
|
||||
@@ -23,6 +23,8 @@ import (
|
||||
"github.com/goplus/llgo/internal/runtime/c"
|
||||
)
|
||||
|
||||
type _type = abi.Type
|
||||
|
||||
type eface struct {
|
||||
_type *_type
|
||||
data unsafe.Pointer
|
||||
|
||||
@@ -16,6 +16,12 @@
|
||||
|
||||
package runtime
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
|
||||
"github.com/goplus/llgo/internal/abi"
|
||||
)
|
||||
|
||||
// Map represents a Go map.
|
||||
type Map = hmap
|
||||
|
||||
@@ -23,3 +29,8 @@ type Map = hmap
|
||||
func MakeSmallMap() *Map {
|
||||
return makemap_small()
|
||||
}
|
||||
|
||||
// Mapassign finds a key in map m and returns the elem address to assign.
|
||||
func Mapassign(t *abi.MapType, m *Map, key unsafe.Pointer) unsafe.Pointer {
|
||||
return mapassign(t, m, key)
|
||||
}
|
||||
|
||||
@@ -93,10 +93,3 @@ func stringTracef(fp c.FilePtr, format *c.Char, s String) {
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
// Zeroinit initializes memory to zero.
|
||||
func Zeroinit(p unsafe.Pointer, size uintptr) unsafe.Pointer {
|
||||
return c.Memset(p, 0, size)
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
@@ -365,3 +365,50 @@ func (b Builder) MapUpdate(m, k, v Expr) {
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
// The Range instruction yields an iterator over the domain and range
|
||||
// of X, which must be a string or map.
|
||||
//
|
||||
// Elements are accessed via Next.
|
||||
//
|
||||
// Type() returns an opaque and degenerate "rangeIter" type.
|
||||
//
|
||||
// Pos() returns the ast.RangeStmt.For.
|
||||
//
|
||||
// Example printed form:
|
||||
//
|
||||
// t0 = range "hello":string
|
||||
func (b Builder) Range(x Expr) Expr {
|
||||
switch x.kind {
|
||||
case vkString:
|
||||
return b.InlineCall(b.Pkg.rtFunc("NewStringIter"), x)
|
||||
}
|
||||
panic("todo")
|
||||
}
|
||||
|
||||
// The Next instruction reads and advances the (map or string)
|
||||
// iterator Iter and returns a 3-tuple value (ok, k, v). If the
|
||||
// iterator is not exhausted, ok is true and k and v are the next
|
||||
// elements of the domain and range, respectively. Otherwise ok is
|
||||
// false and k and v are undefined.
|
||||
//
|
||||
// Components of the tuple are accessed using Extract.
|
||||
//
|
||||
// The IsString field distinguishes iterators over strings from those
|
||||
// over maps, as the Type() alone is insufficient: consider
|
||||
// map[int]rune.
|
||||
//
|
||||
// Type() returns a *types.Tuple for the triple (ok, k, v).
|
||||
// The types of k and/or v may be types.Invalid.
|
||||
//
|
||||
// Example printed form:
|
||||
//
|
||||
// t1 = next t0
|
||||
func (b Builder) Next(iter Expr, isString bool) (ret Expr) {
|
||||
if isString {
|
||||
return b.InlineCall(b.Pkg.rtFunc("StringIterNext"), iter)
|
||||
}
|
||||
panic("todo")
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
47
ssa/expr.go
47
ssa/expr.go
@@ -669,53 +669,6 @@ func castPtr(b llvm.Builder, x llvm.Value, t llvm.Type) llvm.Value {
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
// The Range instruction yields an iterator over the domain and range
|
||||
// of X, which must be a string or map.
|
||||
//
|
||||
// Elements are accessed via Next.
|
||||
//
|
||||
// Type() returns an opaque and degenerate "rangeIter" type.
|
||||
//
|
||||
// Pos() returns the ast.RangeStmt.For.
|
||||
//
|
||||
// Example printed form:
|
||||
//
|
||||
// t0 = range "hello":string
|
||||
func (b Builder) Range(x Expr) Expr {
|
||||
switch x.kind {
|
||||
case vkString:
|
||||
return b.InlineCall(b.Pkg.rtFunc("NewStringIter"), x)
|
||||
}
|
||||
panic("todo")
|
||||
}
|
||||
|
||||
// The Next instruction reads and advances the (map or string)
|
||||
// iterator Iter and returns a 3-tuple value (ok, k, v). If the
|
||||
// iterator is not exhausted, ok is true and k and v are the next
|
||||
// elements of the domain and range, respectively. Otherwise ok is
|
||||
// false and k and v are undefined.
|
||||
//
|
||||
// Components of the tuple are accessed using Extract.
|
||||
//
|
||||
// The IsString field distinguishes iterators over strings from those
|
||||
// over maps, as the Type() alone is insufficient: consider
|
||||
// map[int]rune.
|
||||
//
|
||||
// Type() returns a *types.Tuple for the triple (ok, k, v).
|
||||
// The types of k and/or v may be types.Invalid.
|
||||
//
|
||||
// Example printed form:
|
||||
//
|
||||
// t1 = next t0
|
||||
func (b Builder) Next(iter Expr, isString bool) (ret Expr) {
|
||||
if isString {
|
||||
return b.InlineCall(b.Pkg.rtFunc("StringIterNext"), iter)
|
||||
}
|
||||
panic("todo")
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
// The MakeClosure instruction yields a closure value whose code is
|
||||
// Fn and whose free variables' values are supplied by Bindings.
|
||||
//
|
||||
|
||||
Reference in New Issue
Block a user