Merge pull request #381 from xushiwei/q

patch reflect: ValueOf/Int
This commit is contained in:
xushiwei
2024-06-21 00:42:12 +08:00
committed by GitHub
3 changed files with 480 additions and 4 deletions

View File

@@ -8,4 +8,7 @@ func main() {
v := reflect.Zero(tyIntSlice)
println(v.Len())
v = reflect.ValueOf(100)
println(v.Int())
}

View File

@@ -101,6 +101,83 @@ func (f flag) ro() flag {
return 0
}
func (v Value) typ() *abi.Type {
// Types are either static (for compiler-created types) or
// heap-allocated but always reachable (for reflection-created
// types, held in the central map). So there is no need to
// escape types. noescape here help avoid unnecessary escape
// of v.
return (*abi.Type)(unsafe.Pointer(v.typ_))
}
// pointer returns the underlying pointer represented by v.
// v.Kind() must be Pointer, Map, Chan, Func, or UnsafePointer
// if v.Kind() == Pointer, the base type must not be not-in-heap.
func (v Value) pointer() unsafe.Pointer {
/*
if v.typ().Size() != goarch.PtrSize || !v.typ().Pointers() {
panic("can't call pointer on a non-pointer Value")
}
if v.flag&flagIndir != 0 {
return *(*unsafe.Pointer)(v.ptr)
}
return v.ptr
*/
panic("todo")
}
// packEface converts v to the empty interface.
func packEface(v Value) any {
t := v.typ()
var i any
e := (*emptyInterface)(unsafe.Pointer(&i))
// First, fill in the data portion of the interface.
switch {
case t.IfaceIndir():
if v.flag&flagIndir == 0 {
panic("bad indir")
}
// Value is indirect, and so is the interface we're making.
ptr := v.ptr
if v.flag&flagAddr != 0 {
// TODO: pass safe boolean from valueInterface so
// we don't need to copy if safe==true?
c := unsafe_New(t)
typedmemmove(t, c, ptr)
ptr = c
}
e.word = ptr
case v.flag&flagIndir != 0:
// Value is indirect, but interface is direct. We need
// to load the data at v.ptr into the interface data word.
e.word = *(*unsafe.Pointer)(v.ptr)
default:
// Value is direct, and so is the interface.
e.word = v.ptr
}
// Now, fill in the type portion. We're very careful here not
// to have any operation between the e.word and e.typ assignments
// that would let the garbage collector observe the partially-built
// interface value.
e.typ = t
return i
}
// unpackEface converts the empty interface i to a Value.
func unpackEface(i any) Value {
e := (*emptyInterface)(unsafe.Pointer(&i))
// NOTE: don't read e.word until we know whether it is really a pointer or not.
t := e.typ
if t == nil {
return Value{}
}
f := flag(t.Kind())
if t.IfaceIndir() {
f |= flagIndir
}
return Value{t, e.word, f}
}
// A ValueError occurs when a Value method is invoked on
// a Value that does not support it. Such cases are documented
// in the description of each method.
@@ -227,14 +304,17 @@ func (v Value) Int() int64 {
p := v.ptr
switch k {
case Int:
return int64(*(*int)(p))
return int64(uintptr(p))
case Int8:
return int64(*(*int8)(p))
return int64(uintptr(p))
case Int16:
return int64(*(*int16)(p))
return int64(uintptr(p))
case Int32:
return int64(*(*int32)(p))
return int64(uintptr(p))
case Int64:
if unsafe.Sizeof(uintptr(0)) == 8 {
return int64(uintptr(p))
}
return *(*int64)(p)
}
panic(&ValueError{"reflect.Value.Int", v.kind()})
@@ -520,6 +600,16 @@ func unsafe_New(*abi.Type) unsafe.Pointer
//go:linkname unsafe_NewArray github.com/goplus/llgo/internal/runtime.NewArray
func unsafe_NewArray(*abi.Type, int) unsafe.Pointer
// ValueOf returns a new Value initialized to the concrete value
// stored in the interface i. ValueOf(nil) returns the zero Value.
func ValueOf(i any) Value {
if i == nil {
return Value{}
}
return unpackEface(i)
}
// Zero returns a Value representing the zero value for the specified type.
// The result is different from the zero value of the Value struct,
// which represents no value at all.
@@ -546,3 +636,46 @@ func Zero(typ Type) Value {
// TODO(xsw): check this
// must match declarations in runtime/map.go.
const maxZero = runtime.MaxZero
// memmove copies size bytes to dst from src. No write barriers are used.
//
//go:linkname memmove C.memmove
func memmove(dst, src unsafe.Pointer, size uintptr)
// typedmemmove copies a value of type t to dst from src.
//
//go:linkname typedmemmove github.com/goplus/llgo/internal/runtime.Typedmemmove
func typedmemmove(t *abi.Type, dst, src unsafe.Pointer)
/* TODO(xsw):
// typedmemclr zeros the value at ptr of type t.
//
//go:noescape
func typedmemclr(t *abi.Type, ptr unsafe.Pointer)
// typedmemclrpartial is like typedmemclr but assumes that
// dst points off bytes into the value and only clears size bytes.
//
//go:noescape
func typedmemclrpartial(t *abi.Type, ptr unsafe.Pointer, off, size uintptr)
// typedslicecopy copies a slice of elemType values from src to dst,
// returning the number of elements copied.
//
//go:noescape
func typedslicecopy(t *abi.Type, dst, src unsafeheaderSlice) int
// typedarrayclear zeroes the value at ptr of an array of elemType,
// only clears len elem.
//
//go:noescape
func typedarrayclear(elemType *abi.Type, ptr unsafe.Pointer, len int)
//go:noescape
func typehash(t *abi.Type, p unsafe.Pointer, h uintptr) uintptr
func verifyNotInHeapPtr(p uintptr) bool
//go:noescape
func growslice(t *abi.Type, old unsafeheaderSlice, num int) unsafeheaderSlice
*/

View File

@@ -0,0 +1,340 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Garbage collector: write barriers.
//
// For the concurrent garbage collector, the Go compiler implements
// updates to pointer-valued fields that may be in heap objects by
// emitting calls to write barriers. The main write barrier for
// individual pointer writes is gcWriteBarrier and is implemented in
// assembly. This file contains write barrier entry points for bulk
// operations. See also mwbbuf.go.
package runtime
import (
"unsafe"
"github.com/goplus/llgo/c"
)
// Go uses a hybrid barrier that combines a Yuasa-style deletion
// barrier—which shades the object whose reference is being
// overwritten—with Dijkstra insertion barrier—which shades the object
// whose reference is being written. The insertion part of the barrier
// is necessary while the calling goroutine's stack is grey. In
// pseudocode, the barrier is:
//
// writePointer(slot, ptr):
// shade(*slot)
// if current stack is grey:
// shade(ptr)
// *slot = ptr
//
// slot is the destination in Go code.
// ptr is the value that goes into the slot in Go code.
//
// Shade indicates that it has seen a white pointer by adding the referent
// to wbuf as well as marking it.
//
// The two shades and the condition work together to prevent a mutator
// from hiding an object from the garbage collector:
//
// 1. shade(*slot) prevents a mutator from hiding an object by moving
// the sole pointer to it from the heap to its stack. If it attempts
// to unlink an object from the heap, this will shade it.
//
// 2. shade(ptr) prevents a mutator from hiding an object by moving
// the sole pointer to it from its stack into a black object in the
// heap. If it attempts to install the pointer into a black object,
// this will shade it.
//
// 3. Once a goroutine's stack is black, the shade(ptr) becomes
// unnecessary. shade(ptr) prevents hiding an object by moving it from
// the stack to the heap, but this requires first having a pointer
// hidden on the stack. Immediately after a stack is scanned, it only
// points to shaded objects, so it's not hiding anything, and the
// shade(*slot) prevents it from hiding any other pointers on its
// stack.
//
// For a detailed description of this barrier and proof of
// correctness, see https://github.com/golang/proposal/blob/master/design/17503-eliminate-rescan.md
//
//
//
// Dealing with memory ordering:
//
// Both the Yuasa and Dijkstra barriers can be made conditional on the
// color of the object containing the slot. We chose not to make these
// conditional because the cost of ensuring that the object holding
// the slot doesn't concurrently change color without the mutator
// noticing seems prohibitive.
//
// Consider the following example where the mutator writes into
// a slot and then loads the slot's mark bit while the GC thread
// writes to the slot's mark bit and then as part of scanning reads
// the slot.
//
// Initially both [slot] and [slotmark] are 0 (nil)
// Mutator thread GC thread
// st [slot], ptr st [slotmark], 1
//
// ld r1, [slotmark] ld r2, [slot]
//
// Without an expensive memory barrier between the st and the ld, the final
// result on most HW (including 386/amd64) can be r1==r2==0. This is a classic
// example of what can happen when loads are allowed to be reordered with older
// stores (avoiding such reorderings lies at the heart of the classic
// Peterson/Dekker algorithms for mutual exclusion). Rather than require memory
// barriers, which will slow down both the mutator and the GC, we always grey
// the ptr object regardless of the slot's color.
//
// Another place where we intentionally omit memory barriers is when
// accessing mheap_.arena_used to check if a pointer points into the
// heap. On relaxed memory machines, it's possible for a mutator to
// extend the size of the heap by updating arena_used, allocate an
// object from this new region, and publish a pointer to that object,
// but for tracing running on another processor to observe the pointer
// but use the old value of arena_used. In this case, tracing will not
// mark the object, even though it's reachable. However, the mutator
// is guaranteed to execute a write barrier when it publishes the
// pointer, so it will take care of marking the object. A general
// consequence of this is that the garbage collector may cache the
// value of mheap_.arena_used. (See issue #9984.)
//
//
// Stack writes:
//
// The compiler omits write barriers for writes to the current frame,
// but if a stack pointer has been passed down the call stack, the
// compiler will generate a write barrier for writes through that
// pointer (because it doesn't know it's not a heap pointer).
//
//
// Global writes:
//
// The Go garbage collector requires write barriers when heap pointers
// are stored in globals. Many garbage collectors ignore writes to
// globals and instead pick up global -> heap pointers during
// termination. This increases pause time, so we instead rely on write
// barriers for writes to globals so that we don't have to rescan
// global during mark termination.
//
//
// Publication ordering:
//
// The write barrier is *pre-publication*, meaning that the write
// barrier happens prior to the *slot = ptr write that may make ptr
// reachable by some goroutine that currently cannot reach it.
//
//
// Signal handler pointer writes:
//
// In general, the signal handler cannot safely invoke the write
// barrier because it may run without a P or even during the write
// barrier.
//
// There is exactly one exception: profbuf.go omits a barrier during
// signal handler profile logging. That's safe only because of the
// deletion barrier. See profbuf.go for a detailed argument. If we
// remove the deletion barrier, we'll have to work out a new way to
// handle the profile logging.
// typedmemmove copies a value of type typ to dst from src.
// Must be nosplit, see #16026.
//
// TODO: Perfect for go:nosplitrec since we can't have a safe point
// anywhere in the bulk barrier or memmove.
func Typedmemmove(typ *Type, dst, src unsafe.Pointer) {
if dst == src {
return
}
// There's a race here: if some other goroutine can write to
// src, it may change some pointer in src after we've
// performed the write barrier but before we perform the
// memory copy. This safe because the write performed by that
// other goroutine must also be accompanied by a write
// barrier, so at worst we've unnecessarily greyed the old
// pointer that was in src.
c.Memmove(dst, src, typ.Size_)
}
/*
// wbZero performs the write barrier operations necessary before
// zeroing a region of memory at address dst of type typ.
// Does not actually do the zeroing.
//
//go:nowritebarrierrec
//go:nosplit
func wbZero(typ *_type, dst unsafe.Pointer) {
bulkBarrierPreWrite(uintptr(dst), 0, typ.PtrBytes)
}
// wbMove performs the write barrier operations necessary before
// copying a region of memory from src to dst of type typ.
// Does not actually do the copying.
//
//go:nowritebarrierrec
//go:nosplit
func wbMove(typ *_type, dst, src unsafe.Pointer) {
bulkBarrierPreWrite(uintptr(dst), uintptr(src), typ.PtrBytes)
}
//go:linkname reflect_typedmemmove reflect.typedmemmove
func reflect_typedmemmove(typ *_type, dst, src unsafe.Pointer) {
if raceenabled {
raceWriteObjectPC(typ, dst, getcallerpc(), abi.FuncPCABIInternal(reflect_typedmemmove))
raceReadObjectPC(typ, src, getcallerpc(), abi.FuncPCABIInternal(reflect_typedmemmove))
}
if msanenabled {
msanwrite(dst, typ.Size_)
msanread(src, typ.Size_)
}
if asanenabled {
asanwrite(dst, typ.Size_)
asanread(src, typ.Size_)
}
typedmemmove(typ, dst, src)
}
//go:linkname reflectlite_typedmemmove internal/reflectlite.typedmemmove
func reflectlite_typedmemmove(typ *_type, dst, src unsafe.Pointer) {
reflect_typedmemmove(typ, dst, src)
}
// reflectcallmove is invoked by reflectcall to copy the return values
// out of the stack and into the heap, invoking the necessary write
// barriers. dst, src, and size describe the return value area to
// copy. typ describes the entire frame (not just the return values).
// typ may be nil, which indicates write barriers are not needed.
//
// It must be nosplit and must only call nosplit functions because the
// stack map of reflectcall is wrong.
//
//go:nosplit
func reflectcallmove(typ *_type, dst, src unsafe.Pointer, size uintptr, regs *abi.RegArgs) {
if writeBarrier.needed && typ != nil && typ.PtrBytes != 0 && size >= goarch.PtrSize {
bulkBarrierPreWrite(uintptr(dst), uintptr(src), size)
}
memmove(dst, src, size)
// Move pointers returned in registers to a place where the GC can see them.
for i := range regs.Ints {
if regs.ReturnIsPtr.Get(i) {
regs.Ptrs[i] = unsafe.Pointer(regs.Ints[i])
}
}
}
//go:nosplit
func typedslicecopy(typ *_type, dstPtr unsafe.Pointer, dstLen int, srcPtr unsafe.Pointer, srcLen int) int {
n := dstLen
if n > srcLen {
n = srcLen
}
if n == 0 {
return 0
}
// The compiler emits calls to typedslicecopy before
// instrumentation runs, so unlike the other copying and
// assignment operations, it's not instrumented in the calling
// code and needs its own instrumentation.
if raceenabled {
callerpc := getcallerpc()
pc := abi.FuncPCABIInternal(slicecopy)
racewriterangepc(dstPtr, uintptr(n)*typ.Size_, callerpc, pc)
racereadrangepc(srcPtr, uintptr(n)*typ.Size_, callerpc, pc)
}
if msanenabled {
msanwrite(dstPtr, uintptr(n)*typ.Size_)
msanread(srcPtr, uintptr(n)*typ.Size_)
}
if asanenabled {
asanwrite(dstPtr, uintptr(n)*typ.Size_)
asanread(srcPtr, uintptr(n)*typ.Size_)
}
if goexperiment.CgoCheck2 {
cgoCheckSliceCopy(typ, dstPtr, srcPtr, n)
}
if dstPtr == srcPtr {
return n
}
// Note: No point in checking typ.PtrBytes here:
// compiler only emits calls to typedslicecopy for types with pointers,
// and growslice and reflect_typedslicecopy check for pointers
// before calling typedslicecopy.
size := uintptr(n) * typ.Size_
if writeBarrier.needed {
pwsize := size - typ.Size_ + typ.PtrBytes
bulkBarrierPreWrite(uintptr(dstPtr), uintptr(srcPtr), pwsize)
}
// See typedmemmove for a discussion of the race between the
// barrier and memmove.
memmove(dstPtr, srcPtr, size)
return n
}
//go:linkname reflect_typedslicecopy reflect.typedslicecopy
func reflect_typedslicecopy(elemType *_type, dst, src slice) int {
if elemType.PtrBytes == 0 {
return slicecopy(dst.array, dst.len, src.array, src.len, elemType.Size_)
}
return typedslicecopy(elemType, dst.array, dst.len, src.array, src.len)
}
// typedmemclr clears the typed memory at ptr with type typ. The
// memory at ptr must already be initialized (and hence in type-safe
// state). If the memory is being initialized for the first time, see
// memclrNoHeapPointers.
//
// If the caller knows that typ has pointers, it can alternatively
// call memclrHasPointers.
//
// TODO: A "go:nosplitrec" annotation would be perfect for this.
//
//go:nosplit
func typedmemclr(typ *_type, ptr unsafe.Pointer) {
if writeBarrier.needed && typ.PtrBytes != 0 {
bulkBarrierPreWrite(uintptr(ptr), 0, typ.PtrBytes)
}
memclrNoHeapPointers(ptr, typ.Size_)
}
//go:linkname reflect_typedmemclr reflect.typedmemclr
func reflect_typedmemclr(typ *_type, ptr unsafe.Pointer) {
typedmemclr(typ, ptr)
}
//go:linkname reflect_typedmemclrpartial reflect.typedmemclrpartial
func reflect_typedmemclrpartial(typ *_type, ptr unsafe.Pointer, off, size uintptr) {
if writeBarrier.needed && typ.PtrBytes != 0 {
bulkBarrierPreWrite(uintptr(ptr), 0, size)
}
memclrNoHeapPointers(ptr, size)
}
//go:linkname reflect_typedarrayclear reflect.typedarrayclear
func reflect_typedarrayclear(typ *_type, ptr unsafe.Pointer, len int) {
size := typ.Size_ * uintptr(len)
if writeBarrier.needed && typ.PtrBytes != 0 {
bulkBarrierPreWrite(uintptr(ptr), 0, size)
}
memclrNoHeapPointers(ptr, size)
}
// memclrHasPointers clears n bytes of typed memory starting at ptr.
// The caller must ensure that the type of the object at ptr has
// pointers, usually by checking typ.PtrBytes. However, ptr
// does not have to point to the start of the allocation.
//
//go:nosplit
func memclrHasPointers(ptr unsafe.Pointer, n uintptr) {
bulkBarrierPreWrite(uintptr(ptr), 0, n)
memclrNoHeapPointers(ptr, n)
}
*/