Merge pull request #382 from xushiwei/q
patch reflect: Append/Index; Int fix
This commit is contained in:
13
_demo/reflect/reflect.go
Normal file
13
_demo/reflect/reflect.go
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import "reflect"
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
tyIntSlice := reflect.SliceOf(reflect.TypeOf(0))
|
||||||
|
v := reflect.Zero(tyIntSlice)
|
||||||
|
v = reflect.Append(v, reflect.ValueOf(1), reflect.ValueOf(2), reflect.ValueOf(3))
|
||||||
|
for i, n := 0, v.Len(); i < n; i++ {
|
||||||
|
item := v.Index(i)
|
||||||
|
println(item.Int())
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -440,21 +440,21 @@ func (t *Type) Len() int {
|
|||||||
// Elem returns the element type for t if t is an array, channel, map, pointer, or slice, otherwise nil.
|
// Elem returns the element type for t if t is an array, channel, map, pointer, or slice, otherwise nil.
|
||||||
func (t *Type) Elem() *Type {
|
func (t *Type) Elem() *Type {
|
||||||
switch t.Kind() {
|
switch t.Kind() {
|
||||||
case Array:
|
|
||||||
tt := (*ArrayType)(unsafe.Pointer(t))
|
|
||||||
return tt.Elem
|
|
||||||
case Chan:
|
|
||||||
tt := (*ChanType)(unsafe.Pointer(t))
|
|
||||||
return tt.Elem
|
|
||||||
case Map:
|
|
||||||
tt := (*MapType)(unsafe.Pointer(t))
|
|
||||||
return tt.Elem
|
|
||||||
case Pointer:
|
case Pointer:
|
||||||
tt := (*PtrType)(unsafe.Pointer(t))
|
tt := (*PtrType)(unsafe.Pointer(t))
|
||||||
return tt.Elem
|
return tt.Elem
|
||||||
case Slice:
|
case Slice:
|
||||||
tt := (*SliceType)(unsafe.Pointer(t))
|
tt := (*SliceType)(unsafe.Pointer(t))
|
||||||
return tt.Elem
|
return tt.Elem
|
||||||
|
case Map:
|
||||||
|
tt := (*MapType)(unsafe.Pointer(t))
|
||||||
|
return tt.Elem
|
||||||
|
case Array:
|
||||||
|
tt := (*ArrayType)(unsafe.Pointer(t))
|
||||||
|
return tt.Elem
|
||||||
|
case Chan:
|
||||||
|
tt := (*ChanType)(unsafe.Pointer(t))
|
||||||
|
return tt.Elem
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|||||||
182
internal/lib/reflect/makefunc.go
Normal file
182
internal/lib/reflect/makefunc.go
Normal file
@@ -0,0 +1,182 @@
|
|||||||
|
// Copyright 2012 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// MakeFunc implementation.
|
||||||
|
|
||||||
|
package reflect
|
||||||
|
|
||||||
|
/*
|
||||||
|
import (
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
// makeFuncImpl is the closure value implementing the function
|
||||||
|
// returned by MakeFunc.
|
||||||
|
// The first three words of this type must be kept in sync with
|
||||||
|
// methodValue and runtime.reflectMethodValue.
|
||||||
|
// Any changes should be reflected in all three.
|
||||||
|
type makeFuncImpl struct {
|
||||||
|
makeFuncCtxt
|
||||||
|
ftyp *funcType
|
||||||
|
fn func([]Value) []Value
|
||||||
|
}
|
||||||
|
|
||||||
|
// MakeFunc returns a new function of the given Type
|
||||||
|
// that wraps the function fn. When called, that new function
|
||||||
|
// does the following:
|
||||||
|
//
|
||||||
|
// - converts its arguments to a slice of Values.
|
||||||
|
// - runs results := fn(args).
|
||||||
|
// - returns the results as a slice of Values, one per formal result.
|
||||||
|
//
|
||||||
|
// The implementation fn can assume that the argument Value slice
|
||||||
|
// has the number and type of arguments given by typ.
|
||||||
|
// If typ describes a variadic function, the final Value is itself
|
||||||
|
// a slice representing the variadic arguments, as in the
|
||||||
|
// body of a variadic function. The result Value slice returned by fn
|
||||||
|
// must have the number and type of results given by typ.
|
||||||
|
//
|
||||||
|
// The Value.Call method allows the caller to invoke a typed function
|
||||||
|
// in terms of Values; in contrast, MakeFunc allows the caller to implement
|
||||||
|
// a typed function in terms of Values.
|
||||||
|
//
|
||||||
|
// The Examples section of the documentation includes an illustration
|
||||||
|
// of how to use MakeFunc to build a swap function for different types.
|
||||||
|
func MakeFunc(typ Type, fn func(args []Value) (results []Value)) Value {
|
||||||
|
if typ.Kind() != Func {
|
||||||
|
panic("reflect: call of MakeFunc with non-Func type")
|
||||||
|
}
|
||||||
|
|
||||||
|
t := typ.common()
|
||||||
|
ftyp := (*funcType)(unsafe.Pointer(t))
|
||||||
|
|
||||||
|
code := abi.FuncPCABI0(makeFuncStub)
|
||||||
|
|
||||||
|
// makeFuncImpl contains a stack map for use by the runtime
|
||||||
|
_, _, abid := funcLayout(ftyp, nil)
|
||||||
|
|
||||||
|
impl := &makeFuncImpl{
|
||||||
|
makeFuncCtxt: makeFuncCtxt{
|
||||||
|
fn: code,
|
||||||
|
stack: abid.stackPtrs,
|
||||||
|
argLen: abid.stackCallArgsSize,
|
||||||
|
regPtrs: abid.inRegPtrs,
|
||||||
|
},
|
||||||
|
ftyp: ftyp,
|
||||||
|
fn: fn,
|
||||||
|
}
|
||||||
|
|
||||||
|
return Value{t, unsafe.Pointer(impl), flag(Func)}
|
||||||
|
}
|
||||||
|
|
||||||
|
// makeFuncStub is an assembly function that is the code half of
|
||||||
|
// the function returned from MakeFunc. It expects a *callReflectFunc
|
||||||
|
// as its context register, and its job is to invoke callReflect(ctxt, frame)
|
||||||
|
// where ctxt is the context register and frame is a pointer to the first
|
||||||
|
// word in the passed-in argument frame.
|
||||||
|
func makeFuncStub()
|
||||||
|
|
||||||
|
// The first 3 words of this type must be kept in sync with
|
||||||
|
// makeFuncImpl and runtime.reflectMethodValue.
|
||||||
|
// Any changes should be reflected in all three.
|
||||||
|
type methodValue struct {
|
||||||
|
makeFuncCtxt
|
||||||
|
method int
|
||||||
|
rcvr Value
|
||||||
|
}
|
||||||
|
*/
|
||||||
|
|
||||||
|
// makeMethodValue converts v from the rcvr+method index representation
|
||||||
|
// of a method value to an actual method func value, which is
|
||||||
|
// basically the receiver value with a special bit set, into a true
|
||||||
|
// func value - a value holding an actual func. The output is
|
||||||
|
// semantically equivalent to the input as far as the user of package
|
||||||
|
// reflect can tell, but the true func representation can be handled
|
||||||
|
// by code like Convert and Interface and Assign.
|
||||||
|
func makeMethodValue(op string, v Value) Value {
|
||||||
|
/*
|
||||||
|
if v.flag&flagMethod == 0 {
|
||||||
|
panic("reflect: internal error: invalid use of makeMethodValue")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ignoring the flagMethod bit, v describes the receiver, not the method type.
|
||||||
|
fl := v.flag & (flagRO | flagAddr | flagIndir)
|
||||||
|
fl |= flag(v.typ().Kind())
|
||||||
|
rcvr := Value{v.typ(), v.ptr, fl}
|
||||||
|
|
||||||
|
// v.Type returns the actual type of the method value.
|
||||||
|
ftyp := (*funcType)(unsafe.Pointer(v.Type().(*rtype)))
|
||||||
|
|
||||||
|
code := methodValueCallCodePtr()
|
||||||
|
|
||||||
|
// methodValue contains a stack map for use by the runtime
|
||||||
|
_, _, abid := funcLayout(ftyp, nil)
|
||||||
|
fv := &methodValue{
|
||||||
|
makeFuncCtxt: makeFuncCtxt{
|
||||||
|
fn: code,
|
||||||
|
stack: abid.stackPtrs,
|
||||||
|
argLen: abid.stackCallArgsSize,
|
||||||
|
regPtrs: abid.inRegPtrs,
|
||||||
|
},
|
||||||
|
method: int(v.flag) >> flagMethodShift,
|
||||||
|
rcvr: rcvr,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cause panic if method is not appropriate.
|
||||||
|
// The panic would still happen during the call if we omit this,
|
||||||
|
// but we want Interface() and other operations to fail early.
|
||||||
|
methodReceiver(op, fv.rcvr, fv.method)
|
||||||
|
|
||||||
|
return Value{ftyp.Common(), unsafe.Pointer(fv), v.flag&flagRO | flag(Func)}
|
||||||
|
*/
|
||||||
|
panic("todo")
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
func methodValueCallCodePtr() uintptr {
|
||||||
|
return abi.FuncPCABI0(methodValueCall)
|
||||||
|
}
|
||||||
|
|
||||||
|
// methodValueCall is an assembly function that is the code half of
|
||||||
|
// the function returned from makeMethodValue. It expects a *methodValue
|
||||||
|
// as its context register, and its job is to invoke callMethod(ctxt, frame)
|
||||||
|
// where ctxt is the context register and frame is a pointer to the first
|
||||||
|
// word in the passed-in argument frame.
|
||||||
|
func methodValueCall()
|
||||||
|
|
||||||
|
// This structure must be kept in sync with runtime.reflectMethodValue.
|
||||||
|
// Any changes should be reflected in all both.
|
||||||
|
type makeFuncCtxt struct {
|
||||||
|
fn uintptr
|
||||||
|
stack *bitVector // ptrmap for both stack args and results
|
||||||
|
argLen uintptr // just args
|
||||||
|
regPtrs abi.IntArgRegBitmap
|
||||||
|
}
|
||||||
|
|
||||||
|
// moveMakeFuncArgPtrs uses ctxt.regPtrs to copy integer pointer arguments
|
||||||
|
// in args.Ints to args.Ptrs where the GC can see them.
|
||||||
|
//
|
||||||
|
// This is similar to what reflectcallmove does in the runtime, except
|
||||||
|
// that happens on the return path, whereas this happens on the call path.
|
||||||
|
//
|
||||||
|
// nosplit because pointers are being held in uintptr slots in args, so
|
||||||
|
// having our stack scanned now could lead to accidentally freeing
|
||||||
|
// memory.
|
||||||
|
//
|
||||||
|
//go:nosplit
|
||||||
|
func moveMakeFuncArgPtrs(ctxt *makeFuncCtxt, args *abi.RegArgs) {
|
||||||
|
for i, arg := range args.Ints {
|
||||||
|
// Avoid write barriers! Because our write barrier enqueues what
|
||||||
|
// was there before, we might enqueue garbage.
|
||||||
|
if ctxt.regPtrs.Get(i) {
|
||||||
|
*(*uintptr)(unsafe.Pointer(&args.Ptrs[i])) = arg
|
||||||
|
} else {
|
||||||
|
// We *must* zero this space ourselves because it's defined in
|
||||||
|
// assembly code and the GC will scan these pointers. Otherwise,
|
||||||
|
// there will be garbage here.
|
||||||
|
*(*uintptr)(unsafe.Pointer(&args.Ptrs[i])) = 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
*/
|
||||||
@@ -726,6 +726,17 @@ func (t *rtype) IsVariadic() bool {
|
|||||||
panic("todo")
|
panic("todo")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// add returns p+x.
|
||||||
|
//
|
||||||
|
// The whySafe string is ignored, so that the function still inlines
|
||||||
|
// as efficiently as p+x, but all call sites should use the string to
|
||||||
|
// record why the addition is safe, which is to say why the addition
|
||||||
|
// does not cause x to advance to the very end of p's allocation
|
||||||
|
// and therefore point incorrectly at the next block in memory.
|
||||||
|
func add(p unsafe.Pointer, x uintptr, whySafe string) unsafe.Pointer {
|
||||||
|
return unsafe.Pointer(uintptr(p) + x)
|
||||||
|
}
|
||||||
|
|
||||||
// A StructField describes a single field in a struct.
|
// A StructField describes a single field in a struct.
|
||||||
type StructField struct {
|
type StructField struct {
|
||||||
// Name is the field name.
|
// Name is the field name.
|
||||||
@@ -837,13 +848,13 @@ func TypeOf(i any) Type {
|
|||||||
return toType((*abi.Type)(unsafe.Pointer(eface.typ)))
|
return toType((*abi.Type)(unsafe.Pointer(eface.typ)))
|
||||||
}
|
}
|
||||||
|
|
||||||
/* TODO(xsw):
|
|
||||||
// rtypeOf directly extracts the *rtype of the provided value.
|
// rtypeOf directly extracts the *rtype of the provided value.
|
||||||
func rtypeOf(i any) *abi.Type {
|
func rtypeOf(i any) *abi.Type {
|
||||||
eface := *(*emptyInterface)(unsafe.Pointer(&i))
|
eface := *(*emptyInterface)(unsafe.Pointer(&i))
|
||||||
return eface.typ
|
return eface.typ
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* TODO(xsw):
|
||||||
// ptrMap is the cache for PointerTo.
|
// ptrMap is the cache for PointerTo.
|
||||||
var ptrMap sync.Map // map[*rtype]*ptrType
|
var ptrMap sync.Map // map[*rtype]*ptrType
|
||||||
*/
|
*/
|
||||||
@@ -959,6 +970,241 @@ func (t *rtype) Comparable() bool {
|
|||||||
return t.t.Equal != nil
|
return t.t.Equal != nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// implements reports whether the type V implements the interface type T.
|
||||||
|
func implements(T, V *abi.Type) bool {
|
||||||
|
if T.Kind() != abi.Interface {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
t := (*interfaceType)(unsafe.Pointer(T))
|
||||||
|
if len(t.Methods) == 0 {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
// The same algorithm applies in both cases, but the
|
||||||
|
// method tables for an interface type and a concrete type
|
||||||
|
// are different, so the code is duplicated.
|
||||||
|
// In both cases the algorithm is a linear scan over the two
|
||||||
|
// lists - T's methods and V's methods - simultaneously.
|
||||||
|
// Since method tables are stored in a unique sorted order
|
||||||
|
// (alphabetical, with no duplicate method names), the scan
|
||||||
|
// through V's methods must hit a match for each of T's
|
||||||
|
// methods along the way, or else V does not implement T.
|
||||||
|
// This lets us run the scan in overall linear time instead of
|
||||||
|
// the quadratic time a naive search would require.
|
||||||
|
// See also ../runtime/iface.go.
|
||||||
|
if V.Kind() == abi.Interface {
|
||||||
|
v := (*interfaceType)(unsafe.Pointer(V))
|
||||||
|
i := 0
|
||||||
|
for j := 0; j < len(v.Methods); j++ {
|
||||||
|
tm := &t.Methods[i]
|
||||||
|
tmName := t.nameOff(tm.Name)
|
||||||
|
vm := &v.Methods[j]
|
||||||
|
vmName := nameOffFor(V, vm.Name)
|
||||||
|
if vmName.Name() == tmName.Name() && typeOffFor(V, vm.Typ) == t.typeOff(tm.Typ) {
|
||||||
|
if !tmName.IsExported() {
|
||||||
|
tmPkgPath := pkgPath(tmName)
|
||||||
|
if tmPkgPath == "" {
|
||||||
|
tmPkgPath = t.PkgPath.Name()
|
||||||
|
}
|
||||||
|
vmPkgPath := pkgPath(vmName)
|
||||||
|
if vmPkgPath == "" {
|
||||||
|
vmPkgPath = v.PkgPath.Name()
|
||||||
|
}
|
||||||
|
if tmPkgPath != vmPkgPath {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if i++; i >= len(t.Methods) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
v := V.Uncommon()
|
||||||
|
if v == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
i := 0
|
||||||
|
vmethods := v.Methods()
|
||||||
|
for j := 0; j < int(v.Mcount); j++ {
|
||||||
|
tm := &t.Methods[i]
|
||||||
|
tmName := t.nameOff(tm.Name)
|
||||||
|
vm := vmethods[j]
|
||||||
|
vmName := nameOffFor(V, vm.Name)
|
||||||
|
if vmName.Name() == tmName.Name() && typeOffFor(V, vm.Mtyp) == t.typeOff(tm.Typ) {
|
||||||
|
if !tmName.IsExported() {
|
||||||
|
tmPkgPath := pkgPath(tmName)
|
||||||
|
if tmPkgPath == "" {
|
||||||
|
tmPkgPath = t.PkgPath.Name()
|
||||||
|
}
|
||||||
|
vmPkgPath := pkgPath(vmName)
|
||||||
|
if vmPkgPath == "" {
|
||||||
|
vmPkgPath = nameOffFor(V, v.PkgPath).Name()
|
||||||
|
}
|
||||||
|
if tmPkgPath != vmPkgPath {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if i++; i >= len(t.Methods) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
*/
|
||||||
|
panic("todo")
|
||||||
|
}
|
||||||
|
|
||||||
|
// specialChannelAssignability reports whether a value x of channel type V
|
||||||
|
// can be directly assigned (using memmove) to another channel type T.
|
||||||
|
// https://golang.org/doc/go_spec.html#Assignability
|
||||||
|
// T and V must be both of Chan kind.
|
||||||
|
func specialChannelAssignability(T, V *abi.Type) bool {
|
||||||
|
/*
|
||||||
|
// Special case:
|
||||||
|
// x is a bidirectional channel value, T is a channel type,
|
||||||
|
// x's type V and T have identical element types,
|
||||||
|
// and at least one of V or T is not a defined type.
|
||||||
|
return V.ChanDir() == abi.BothDir && (nameFor(T) == "" || nameFor(V) == "") && haveIdenticalType(T.Elem(), V.Elem(), true)
|
||||||
|
*/
|
||||||
|
panic("todo")
|
||||||
|
}
|
||||||
|
|
||||||
|
// directlyAssignable reports whether a value x of type V can be directly
|
||||||
|
// assigned (using memmove) to a value of type T.
|
||||||
|
// https://golang.org/doc/go_spec.html#Assignability
|
||||||
|
// Ignoring the interface rules (implemented elsewhere)
|
||||||
|
// and the ideal constant rules (no ideal constants at run time).
|
||||||
|
func directlyAssignable(T, V *abi.Type) bool {
|
||||||
|
// x's type V is identical to T?
|
||||||
|
if T == V {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Otherwise at least one of T and V must not be defined
|
||||||
|
// and they must have the same kind.
|
||||||
|
if T.HasName() && V.HasName() || T.Kind() != V.Kind() {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if T.Kind() == abi.Chan && specialChannelAssignability(T, V) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// x's type T and V must have identical underlying types.
|
||||||
|
return haveIdenticalUnderlyingType(T, V, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
func haveIdenticalType(T, V *abi.Type, cmpTags bool) bool {
|
||||||
|
if cmpTags {
|
||||||
|
return T == V
|
||||||
|
}
|
||||||
|
|
||||||
|
if nameFor(T) != nameFor(V) || T.Kind() != V.Kind() || pkgPathFor(T) != pkgPathFor(V) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return haveIdenticalUnderlyingType(T, V, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
func haveIdenticalUnderlyingType(T, V *abi.Type, cmpTags bool) bool {
|
||||||
|
if T == V {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
kind := Kind(T.Kind())
|
||||||
|
if kind != Kind(V.Kind()) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Non-composite types of equal kind have same underlying type
|
||||||
|
// (the predefined instance of the type).
|
||||||
|
if Bool <= kind && kind <= Complex128 || kind == String || kind == UnsafePointer {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
// Composite types.
|
||||||
|
switch kind {
|
||||||
|
case Array:
|
||||||
|
return T.Len() == V.Len() && haveIdenticalType(T.Elem(), V.Elem(), cmpTags)
|
||||||
|
|
||||||
|
case Chan:
|
||||||
|
return V.ChanDir() == T.ChanDir() && haveIdenticalType(T.Elem(), V.Elem(), cmpTags)
|
||||||
|
|
||||||
|
case Func:
|
||||||
|
t := (*funcType)(unsafe.Pointer(T))
|
||||||
|
v := (*funcType)(unsafe.Pointer(V))
|
||||||
|
if t.OutCount != v.OutCount || t.InCount != v.InCount {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
for i := 0; i < t.NumIn(); i++ {
|
||||||
|
if !haveIdenticalType(t.In(i), v.In(i), cmpTags) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for i := 0; i < t.NumOut(); i++ {
|
||||||
|
if !haveIdenticalType(t.Out(i), v.Out(i), cmpTags) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
|
||||||
|
case Interface:
|
||||||
|
t := (*interfaceType)(unsafe.Pointer(T))
|
||||||
|
v := (*interfaceType)(unsafe.Pointer(V))
|
||||||
|
if len(t.Methods) == 0 && len(v.Methods) == 0 {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
// Might have the same methods but still
|
||||||
|
// need a run time conversion.
|
||||||
|
return false
|
||||||
|
|
||||||
|
case Map:
|
||||||
|
return haveIdenticalType(T.Key(), V.Key(), cmpTags) && haveIdenticalType(T.Elem(), V.Elem(), cmpTags)
|
||||||
|
|
||||||
|
case Pointer, Slice:
|
||||||
|
return haveIdenticalType(T.Elem(), V.Elem(), cmpTags)
|
||||||
|
|
||||||
|
case Struct:
|
||||||
|
t := (*structType)(unsafe.Pointer(T))
|
||||||
|
v := (*structType)(unsafe.Pointer(V))
|
||||||
|
if len(t.Fields) != len(v.Fields) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if t.PkgPath.Name() != v.PkgPath.Name() {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
for i := range t.Fields {
|
||||||
|
tf := &t.Fields[i]
|
||||||
|
vf := &v.Fields[i]
|
||||||
|
if tf.Name.Name() != vf.Name.Name() {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if !haveIdenticalType(tf.Typ, vf.Typ, cmpTags) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if cmpTags && tf.Name.Tag() != vf.Name.Tag() {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if tf.Offset != vf.Offset {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if tf.Embedded() != vf.Embedded() {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
*/
|
||||||
|
panic("todo")
|
||||||
|
}
|
||||||
|
|
||||||
// SliceOf returns the slice type with element type t.
|
// SliceOf returns the slice type with element type t.
|
||||||
// For example, if t represents int, SliceOf(t) represents []int.
|
// For example, if t represents int, SliceOf(t) represents []int.
|
||||||
func SliceOf(t Type) Type {
|
func SliceOf(t Type) Type {
|
||||||
|
|||||||
@@ -2,11 +2,6 @@
|
|||||||
// Use of this source code is governed by a BSD-style
|
// Use of this source code is governed by a BSD-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
// Package unsafeheader contains header declarations for the Go runtime's slice
|
|
||||||
// and string implementations.
|
|
||||||
//
|
|
||||||
// This package allows packages that cannot import "reflect" to use types that
|
|
||||||
// are tested to be equivalent to reflect.SliceHeader and reflect.StringHeader.
|
|
||||||
package reflect
|
package reflect
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
|||||||
@@ -193,6 +193,28 @@ func (e *ValueError) Error() string {
|
|||||||
return "reflect: call of " + e.Method + " on " + e.Kind.String() + " Value"
|
return "reflect: call of " + e.Method + " on " + e.Kind.String() + " Value"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// valueMethodName returns the name of the exported calling method on Value.
|
||||||
|
func valueMethodName() string {
|
||||||
|
/* TODO(xsw):
|
||||||
|
var pc [5]uintptr
|
||||||
|
n := runtime.Callers(1, pc[:])
|
||||||
|
frames := runtime.CallersFrames(pc[:n])
|
||||||
|
var frame runtime.Frame
|
||||||
|
for more := true; more; {
|
||||||
|
const prefix = "reflect.Value."
|
||||||
|
frame, more = frames.Next()
|
||||||
|
name := frame.Function
|
||||||
|
if len(name) > len(prefix) && name[:len(prefix)] == prefix {
|
||||||
|
methodName := name[len(prefix):]
|
||||||
|
if len(methodName) > 0 && 'A' <= methodName[0] && methodName[0] <= 'Z' {
|
||||||
|
return name
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
*/
|
||||||
|
return "unknown method"
|
||||||
|
}
|
||||||
|
|
||||||
// emptyInterface is the header for an interface{} value.
|
// emptyInterface is the header for an interface{} value.
|
||||||
type emptyInterface struct {
|
type emptyInterface struct {
|
||||||
typ *abi.Type
|
typ *abi.Type
|
||||||
@@ -212,6 +234,125 @@ type nonEmptyInterface struct {
|
|||||||
word unsafe.Pointer
|
word unsafe.Pointer
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// mustBe panics if f's kind is not expected.
|
||||||
|
// Making this a method on flag instead of on Value
|
||||||
|
// (and embedding flag in Value) means that we can write
|
||||||
|
// the very clear v.mustBe(Bool) and have it compile into
|
||||||
|
// v.flag.mustBe(Bool), which will only bother to copy the
|
||||||
|
// single important word for the receiver.
|
||||||
|
func (f flag) mustBe(expected Kind) {
|
||||||
|
// TODO(mvdan): use f.kind() again once mid-stack inlining gets better
|
||||||
|
if Kind(f&flagKindMask) != expected {
|
||||||
|
panic(&ValueError{valueMethodName(), f.kind()})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// mustBeExported panics if f records that the value was obtained using
|
||||||
|
// an unexported field.
|
||||||
|
func (f flag) mustBeExported() {
|
||||||
|
if f == 0 || f&flagRO != 0 {
|
||||||
|
f.mustBeExportedSlow()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f flag) mustBeExportedSlow() {
|
||||||
|
if f == 0 {
|
||||||
|
panic(&ValueError{valueMethodName(), Invalid})
|
||||||
|
}
|
||||||
|
if f&flagRO != 0 {
|
||||||
|
panic("reflect: " + valueMethodName() + " using value obtained using unexported field")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// mustBeAssignable panics if f records that the value is not assignable,
|
||||||
|
// which is to say that either it was obtained using an unexported field
|
||||||
|
// or it is not addressable.
|
||||||
|
func (f flag) mustBeAssignable() {
|
||||||
|
if f&flagRO != 0 || f&flagAddr == 0 {
|
||||||
|
f.mustBeAssignableSlow()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f flag) mustBeAssignableSlow() {
|
||||||
|
if f == 0 {
|
||||||
|
panic(&ValueError{valueMethodName(), Invalid})
|
||||||
|
}
|
||||||
|
// Assignable if addressable and not read-only.
|
||||||
|
if f&flagRO != 0 {
|
||||||
|
panic("reflect: " + valueMethodName() + " using value obtained using unexported field")
|
||||||
|
}
|
||||||
|
if f&flagAddr == 0 {
|
||||||
|
panic("reflect: " + valueMethodName() + " using unaddressable value")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Addr returns a pointer value representing the address of v.
|
||||||
|
// It panics if CanAddr() returns false.
|
||||||
|
// Addr is typically used to obtain a pointer to a struct field
|
||||||
|
// or slice element in order to call a method that requires a
|
||||||
|
// pointer receiver.
|
||||||
|
func (v Value) Addr() Value {
|
||||||
|
if v.flag&flagAddr == 0 {
|
||||||
|
panic("reflect.Value.Addr of unaddressable value")
|
||||||
|
}
|
||||||
|
// Preserve flagRO instead of using v.flag.ro() so that
|
||||||
|
// v.Addr().Elem() is equivalent to v (#32772)
|
||||||
|
fl := v.flag & flagRO
|
||||||
|
return Value{ptrTo(v.typ()), v.ptr, fl | flag(Pointer)}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Bool returns v's underlying value.
|
||||||
|
// It panics if v's kind is not Bool.
|
||||||
|
func (v Value) Bool() bool {
|
||||||
|
// panicNotBool is split out to keep Bool inlineable.
|
||||||
|
if v.kind() != Bool {
|
||||||
|
v.panicNotBool()
|
||||||
|
}
|
||||||
|
return *(*bool)(v.ptr)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v Value) panicNotBool() {
|
||||||
|
v.mustBe(Bool)
|
||||||
|
}
|
||||||
|
|
||||||
|
var bytesType = rtypeOf(([]byte)(nil))
|
||||||
|
|
||||||
|
// Bytes returns v's underlying value.
|
||||||
|
// It panics if v's underlying value is not a slice of bytes or
|
||||||
|
// an addressable array of bytes.
|
||||||
|
func (v Value) Bytes() []byte {
|
||||||
|
// bytesSlow is split out to keep Bytes inlineable for unnamed []byte.
|
||||||
|
if v.typ_ == bytesType { // ok to use v.typ_ directly as comparison doesn't cause escape
|
||||||
|
return *(*[]byte)(v.ptr)
|
||||||
|
}
|
||||||
|
return v.bytesSlow()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v Value) bytesSlow() []byte {
|
||||||
|
/*
|
||||||
|
switch v.kind() {
|
||||||
|
case Slice:
|
||||||
|
if v.typ().Elem().Kind() != abi.Uint8 {
|
||||||
|
panic("reflect.Value.Bytes of non-byte slice")
|
||||||
|
}
|
||||||
|
// Slice is always bigger than a word; assume flagIndir.
|
||||||
|
return *(*[]byte)(v.ptr)
|
||||||
|
case Array:
|
||||||
|
if v.typ().Elem().Kind() != abi.Uint8 {
|
||||||
|
panic("reflect.Value.Bytes of non-byte array")
|
||||||
|
}
|
||||||
|
if !v.CanAddr() {
|
||||||
|
panic("reflect.Value.Bytes of unaddressable byte array")
|
||||||
|
}
|
||||||
|
p := (*byte)(v.ptr)
|
||||||
|
n := int((*arrayType)(unsafe.Pointer(v.typ())).Len)
|
||||||
|
return unsafe.Slice(p, n)
|
||||||
|
}
|
||||||
|
panic(&ValueError{"reflect.Value.Bytes", v.kind()})
|
||||||
|
*/
|
||||||
|
panic("todo")
|
||||||
|
}
|
||||||
|
|
||||||
// CanFloat reports whether Float can be used without panicking.
|
// CanFloat reports whether Float can be used without panicking.
|
||||||
func (v Value) CanFloat() bool {
|
func (v Value) CanFloat() bool {
|
||||||
switch v.kind() {
|
switch v.kind() {
|
||||||
@@ -235,14 +376,34 @@ func (v Value) Float() float64 {
|
|||||||
panic(&ValueError{"reflect.Value.Float", v.kind()})
|
panic(&ValueError{"reflect.Value.Float", v.kind()})
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO(xsw):
|
var uint8Type = rtypeOf(uint8(0))
|
||||||
// var uint8Type = rtypeOf(uint8(0))
|
|
||||||
|
|
||||||
// Index returns v's i'th element.
|
// Index returns v's i'th element.
|
||||||
// It panics if v's Kind is not Array, Slice, or String or i is out of range.
|
// It panics if v's Kind is not Array, Slice, or String or i is out of range.
|
||||||
func (v Value) Index(i int) Value {
|
func (v Value) Index(i int) Value {
|
||||||
/*
|
|
||||||
switch v.kind() {
|
switch v.kind() {
|
||||||
|
case Slice:
|
||||||
|
// Element flag same as Elem of Pointer.
|
||||||
|
// Addressable, indirect, possibly read-only.
|
||||||
|
s := (*unsafeheaderSlice)(v.ptr)
|
||||||
|
if uint(i) >= uint(s.Len) {
|
||||||
|
panic("reflect: slice index out of range")
|
||||||
|
}
|
||||||
|
tt := (*sliceType)(unsafe.Pointer(v.typ()))
|
||||||
|
typ := tt.Elem
|
||||||
|
val := arrayAt(s.Data, i, typ.Size(), "i < s.Len")
|
||||||
|
fl := flagAddr | flagIndir | v.flag.ro() | flag(typ.Kind())
|
||||||
|
return Value{typ, val, fl}
|
||||||
|
|
||||||
|
case String:
|
||||||
|
s := (*unsafeheaderString)(v.ptr)
|
||||||
|
if uint(i) >= uint(s.Len) {
|
||||||
|
panic("reflect: string index out of range")
|
||||||
|
}
|
||||||
|
p := arrayAt(s.Data, i, 1, "i < s.Len")
|
||||||
|
fl := v.flag.ro() | flag(Uint8) | flagIndir
|
||||||
|
return Value{uint8Type, p, fl}
|
||||||
|
|
||||||
case Array:
|
case Array:
|
||||||
tt := (*arrayType)(unsafe.Pointer(v.typ()))
|
tt := (*arrayType)(unsafe.Pointer(v.typ()))
|
||||||
if uint(i) >= uint(tt.Len) {
|
if uint(i) >= uint(tt.Len) {
|
||||||
@@ -259,32 +420,8 @@ func (v Value) Index(i int) Value {
|
|||||||
val := add(v.ptr, offset, "same as &v[i], i < tt.len")
|
val := add(v.ptr, offset, "same as &v[i], i < tt.len")
|
||||||
fl := v.flag&(flagIndir|flagAddr) | v.flag.ro() | flag(typ.Kind()) // bits same as overall array
|
fl := v.flag&(flagIndir|flagAddr) | v.flag.ro() | flag(typ.Kind()) // bits same as overall array
|
||||||
return Value{typ, val, fl}
|
return Value{typ, val, fl}
|
||||||
|
|
||||||
case Slice:
|
|
||||||
// Element flag same as Elem of Pointer.
|
|
||||||
// Addressable, indirect, possibly read-only.
|
|
||||||
s := (*unsafeheader.Slice)(v.ptr)
|
|
||||||
if uint(i) >= uint(s.Len) {
|
|
||||||
panic("reflect: slice index out of range")
|
|
||||||
}
|
|
||||||
tt := (*sliceType)(unsafe.Pointer(v.typ()))
|
|
||||||
typ := tt.Elem
|
|
||||||
val := arrayAt(s.Data, i, typ.Size(), "i < s.Len")
|
|
||||||
fl := flagAddr | flagIndir | v.flag.ro() | flag(typ.Kind())
|
|
||||||
return Value{typ, val, fl}
|
|
||||||
|
|
||||||
case String:
|
|
||||||
s := (*unsafeheader.String)(v.ptr)
|
|
||||||
if uint(i) >= uint(s.Len) {
|
|
||||||
panic("reflect: string index out of range")
|
|
||||||
}
|
|
||||||
p := arrayAt(s.Data, i, 1, "i < s.Len")
|
|
||||||
fl := v.flag.ro() | flag(Uint8) | flagIndir
|
|
||||||
return Value{uint8Type, p, fl}
|
|
||||||
}
|
}
|
||||||
panic(&ValueError{"reflect.Value.Index", v.kind()})
|
panic(&ValueError{"reflect.Value.Index", v.kind()})
|
||||||
*/
|
|
||||||
panic("todo")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// CanInt reports whether Int can be used without panicking.
|
// CanInt reports whether Int can be used without panicking.
|
||||||
@@ -300,23 +437,34 @@ func (v Value) CanInt() bool {
|
|||||||
// Int returns v's underlying value, as an int64.
|
// Int returns v's underlying value, as an int64.
|
||||||
// It panics if v's Kind is not Int, Int8, Int16, Int32, or Int64.
|
// It panics if v's Kind is not Int, Int8, Int16, Int32, or Int64.
|
||||||
func (v Value) Int() int64 {
|
func (v Value) Int() int64 {
|
||||||
k := v.kind()
|
f := v.flag
|
||||||
|
k := f.kind()
|
||||||
p := v.ptr
|
p := v.ptr
|
||||||
|
if f&flagAddr != 0 {
|
||||||
switch k {
|
switch k {
|
||||||
case Int:
|
case Int:
|
||||||
return int64(uintptr(p))
|
return int64(*(*int)(p))
|
||||||
case Int8:
|
case Int8:
|
||||||
return int64(uintptr(p))
|
return int64(*(*int8)(p))
|
||||||
case Int16:
|
case Int16:
|
||||||
return int64(uintptr(p))
|
return int64(*(*int16)(p))
|
||||||
case Int32:
|
case Int32:
|
||||||
return int64(uintptr(p))
|
return int64(*(*int32)(p))
|
||||||
case Int64:
|
case Int64:
|
||||||
if unsafe.Sizeof(uintptr(0)) == 8 {
|
return *(*int64)(p)
|
||||||
|
}
|
||||||
|
} else if unsafe.Sizeof(uintptr(0)) == 8 {
|
||||||
|
if k >= Int && k <= Int64 {
|
||||||
return int64(uintptr(p))
|
return int64(uintptr(p))
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
if k >= Int && k <= Int32 {
|
||||||
|
return int64(uintptr(p))
|
||||||
|
}
|
||||||
|
if k == Int64 {
|
||||||
return *(*int64)(p)
|
return *(*int64)(p)
|
||||||
}
|
}
|
||||||
|
}
|
||||||
panic(&ValueError{"reflect.Value.Int", v.kind()})
|
panic(&ValueError{"reflect.Value.Int", v.kind()})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -594,6 +742,107 @@ func (v Value) lenNonSlice() int {
|
|||||||
panic("todo")
|
panic("todo")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Set assigns x to the value v.
|
||||||
|
// It panics if CanSet returns false.
|
||||||
|
// As in Go, x's value must be assignable to v's type and
|
||||||
|
// must not be derived from an unexported field.
|
||||||
|
func (v Value) Set(x Value) {
|
||||||
|
v.mustBeAssignable()
|
||||||
|
x.mustBeExported() // do not let unexported x leak
|
||||||
|
var target unsafe.Pointer
|
||||||
|
if v.kind() == Interface {
|
||||||
|
target = v.ptr
|
||||||
|
}
|
||||||
|
x = x.assignTo("reflect.Set", v.typ(), target)
|
||||||
|
if x.flag&flagIndir != 0 {
|
||||||
|
if x.ptr == unsafe.Pointer(&runtime.ZeroVal[0]) {
|
||||||
|
typedmemclr(v.typ(), v.ptr)
|
||||||
|
} else {
|
||||||
|
typedmemmove(v.typ(), v.ptr, x.ptr)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
*(*unsafe.Pointer)(v.ptr) = x.ptr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetBool sets v's underlying value.
|
||||||
|
// It panics if v's Kind is not Bool or if CanSet() is false.
|
||||||
|
func (v Value) SetBool(x bool) {
|
||||||
|
v.mustBeAssignable()
|
||||||
|
v.mustBe(Bool)
|
||||||
|
*(*bool)(v.ptr) = x
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetBytes sets v's underlying value.
|
||||||
|
// It panics if v's underlying value is not a slice of bytes.
|
||||||
|
func (v Value) SetBytes(x []byte) {
|
||||||
|
v.mustBeAssignable()
|
||||||
|
v.mustBe(Slice)
|
||||||
|
if toRType(v.typ()).Elem().Kind() != Uint8 { // TODO add Elem method, fix mustBe(Slice) to return slice.
|
||||||
|
panic("reflect.Value.SetBytes of non-byte slice")
|
||||||
|
}
|
||||||
|
*(*[]byte)(v.ptr) = x
|
||||||
|
}
|
||||||
|
|
||||||
|
// setRunes sets v's underlying value.
|
||||||
|
// It panics if v's underlying value is not a slice of runes (int32s).
|
||||||
|
func (v Value) setRunes(x []rune) {
|
||||||
|
v.mustBeAssignable()
|
||||||
|
v.mustBe(Slice)
|
||||||
|
if v.typ().Elem().Kind() != abi.Int32 {
|
||||||
|
panic("reflect.Value.setRunes of non-rune slice")
|
||||||
|
}
|
||||||
|
*(*[]rune)(v.ptr) = x
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetComplex sets v's underlying value to x.
|
||||||
|
// It panics if v's Kind is not Complex64 or Complex128, or if CanSet() is false.
|
||||||
|
func (v Value) SetComplex(x complex128) {
|
||||||
|
v.mustBeAssignable()
|
||||||
|
switch k := v.kind(); k {
|
||||||
|
default:
|
||||||
|
panic(&ValueError{"reflect.Value.SetComplex", v.kind()})
|
||||||
|
case Complex64:
|
||||||
|
*(*complex64)(v.ptr) = complex64(x)
|
||||||
|
case Complex128:
|
||||||
|
*(*complex128)(v.ptr) = x
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetFloat sets v's underlying value to x.
|
||||||
|
// It panics if v's Kind is not Float32 or Float64, or if CanSet() is false.
|
||||||
|
func (v Value) SetFloat(x float64) {
|
||||||
|
v.mustBeAssignable()
|
||||||
|
switch k := v.kind(); k {
|
||||||
|
default:
|
||||||
|
panic(&ValueError{"reflect.Value.SetFloat", v.kind()})
|
||||||
|
case Float32:
|
||||||
|
*(*float32)(v.ptr) = float32(x)
|
||||||
|
case Float64:
|
||||||
|
*(*float64)(v.ptr) = x
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetInt sets v's underlying value to x.
|
||||||
|
// It panics if v's Kind is not Int, Int8, Int16, Int32, or Int64, or if CanSet() is false.
|
||||||
|
func (v Value) SetInt(x int64) {
|
||||||
|
v.mustBeAssignable()
|
||||||
|
switch k := v.kind(); k {
|
||||||
|
default:
|
||||||
|
panic(&ValueError{"reflect.Value.SetInt", v.kind()})
|
||||||
|
case Int:
|
||||||
|
*(*int)(v.ptr) = int(x)
|
||||||
|
case Int8:
|
||||||
|
*(*int8)(v.ptr) = int8(x)
|
||||||
|
case Int16:
|
||||||
|
*(*int16)(v.ptr) = int16(x)
|
||||||
|
case Int32:
|
||||||
|
*(*int32)(v.ptr) = int32(x)
|
||||||
|
case Int64:
|
||||||
|
*(*int64)(v.ptr) = x
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
//go:linkname unsafe_New github.com/goplus/llgo/internal/runtime.New
|
//go:linkname unsafe_New github.com/goplus/llgo/internal/runtime.New
|
||||||
func unsafe_New(*abi.Type) unsafe.Pointer
|
func unsafe_New(*abi.Type) unsafe.Pointer
|
||||||
|
|
||||||
@@ -610,6 +859,94 @@ func ValueOf(i any) Value {
|
|||||||
return unpackEface(i)
|
return unpackEface(i)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// arrayAt returns the i-th element of p,
|
||||||
|
// an array whose elements are eltSize bytes wide.
|
||||||
|
// The array pointed at by p must have at least i+1 elements:
|
||||||
|
// it is invalid (but impossible to check here) to pass i >= len,
|
||||||
|
// because then the result will point outside the array.
|
||||||
|
// whySafe must explain why i < len. (Passing "i < len" is fine;
|
||||||
|
// the benefit is to surface this assumption at the call site.)
|
||||||
|
func arrayAt(p unsafe.Pointer, i int, eltSize uintptr, whySafe string) unsafe.Pointer {
|
||||||
|
return add(p, uintptr(i)*eltSize, "i < len")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Grow increases the slice's capacity, if necessary, to guarantee space for
|
||||||
|
// another n elements. After Grow(n), at least n elements can be appended
|
||||||
|
// to the slice without another allocation.
|
||||||
|
//
|
||||||
|
// It panics if v's Kind is not a Slice or if n is negative or too large to
|
||||||
|
// allocate the memory.
|
||||||
|
func (v Value) Grow(n int) {
|
||||||
|
v.mustBeAssignable()
|
||||||
|
v.mustBe(Slice)
|
||||||
|
v.grow(n)
|
||||||
|
}
|
||||||
|
|
||||||
|
// grow is identical to Grow but does not check for assignability.
|
||||||
|
func (v Value) grow(n int) {
|
||||||
|
p := (*unsafeheaderSlice)(v.ptr)
|
||||||
|
oldLen := p.Len
|
||||||
|
switch {
|
||||||
|
case n < 0:
|
||||||
|
panic("reflect.Value.Grow: negative len")
|
||||||
|
case oldLen+n < 0:
|
||||||
|
panic("reflect.Value.Grow: slice overflow")
|
||||||
|
case oldLen+n > p.Cap:
|
||||||
|
t := v.typ().Elem()
|
||||||
|
*p = growslice(*p, n, int(t.Size_))
|
||||||
|
p.Len = oldLen // set oldLen back
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// extendSlice extends a slice by n elements.
|
||||||
|
//
|
||||||
|
// Unlike Value.grow, which modifies the slice in place and
|
||||||
|
// does not change the length of the slice in place,
|
||||||
|
// extendSlice returns a new slice value with the length
|
||||||
|
// incremented by the number of specified elements.
|
||||||
|
func (v Value) extendSlice(n int) Value {
|
||||||
|
v.mustBeExported()
|
||||||
|
v.mustBe(Slice)
|
||||||
|
|
||||||
|
// Shallow copy the slice header to avoid mutating the source slice.
|
||||||
|
sh := *(*unsafeheaderSlice)(v.ptr)
|
||||||
|
s := &sh
|
||||||
|
v.ptr = unsafe.Pointer(s)
|
||||||
|
v.flag = flagIndir | flag(Slice) // equivalent flag to MakeSlice
|
||||||
|
|
||||||
|
v.grow(n) // fine to treat as assignable since we allocate a new slice header
|
||||||
|
s.Len += n
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
|
// Append appends the values x to a slice s and returns the resulting slice.
|
||||||
|
// As in Go, each x's value must be assignable to the slice's element type.
|
||||||
|
func Append(s Value, x ...Value) Value {
|
||||||
|
s.mustBe(Slice)
|
||||||
|
n := s.Len()
|
||||||
|
s = s.extendSlice(len(x))
|
||||||
|
for i, v := range x {
|
||||||
|
s.Index(n + i).Set(v)
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// AppendSlice appends a slice t to a slice s and returns the resulting slice.
|
||||||
|
// The slices s and t must have the same element type.
|
||||||
|
func AppendSlice(s, t Value) Value {
|
||||||
|
/*
|
||||||
|
s.mustBe(Slice)
|
||||||
|
t.mustBe(Slice)
|
||||||
|
typesMustMatch("reflect.AppendSlice", s.Type().Elem(), t.Type().Elem())
|
||||||
|
ns := s.Len()
|
||||||
|
nt := t.Len()
|
||||||
|
s = s.extendSlice(nt)
|
||||||
|
Copy(s.Slice(ns, ns+nt), t)
|
||||||
|
return s
|
||||||
|
*/
|
||||||
|
panic("todo")
|
||||||
|
}
|
||||||
|
|
||||||
// Zero returns a Value representing the zero value for the specified type.
|
// Zero returns a Value representing the zero value for the specified type.
|
||||||
// The result is different from the zero value of the Value struct,
|
// The result is different from the zero value of the Value struct,
|
||||||
// which represents no value at all.
|
// which represents no value at all.
|
||||||
@@ -637,6 +974,79 @@ func Zero(typ Type) Value {
|
|||||||
// must match declarations in runtime/map.go.
|
// must match declarations in runtime/map.go.
|
||||||
const maxZero = runtime.MaxZero
|
const maxZero = runtime.MaxZero
|
||||||
|
|
||||||
|
// New returns a Value representing a pointer to a new zero value
|
||||||
|
// for the specified type. That is, the returned Value's Type is PointerTo(typ).
|
||||||
|
func New(typ Type) Value {
|
||||||
|
/*
|
||||||
|
if typ == nil {
|
||||||
|
panic("reflect: New(nil)")
|
||||||
|
}
|
||||||
|
t := &typ.(*rtype).t
|
||||||
|
pt := ptrTo(t)
|
||||||
|
if ifaceIndir(pt) {
|
||||||
|
// This is a pointer to a not-in-heap type.
|
||||||
|
panic("reflect: New of type that may not be allocated in heap (possibly undefined cgo C type)")
|
||||||
|
}
|
||||||
|
ptr := unsafe_New(t)
|
||||||
|
fl := flag(Pointer)
|
||||||
|
return Value{pt, ptr, fl}
|
||||||
|
*/
|
||||||
|
panic("todo")
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewAt returns a Value representing a pointer to a value of the
|
||||||
|
// specified type, using p as that pointer.
|
||||||
|
func NewAt(typ Type, p unsafe.Pointer) Value {
|
||||||
|
fl := flag(Pointer)
|
||||||
|
t := typ.(*rtype)
|
||||||
|
return Value{t.ptrTo(), p, fl}
|
||||||
|
}
|
||||||
|
|
||||||
|
// assignTo returns a value v that can be assigned directly to dst.
|
||||||
|
// It panics if v is not assignable to dst.
|
||||||
|
// For a conversion to an interface type, target, if not nil,
|
||||||
|
// is a suggested scratch space to use.
|
||||||
|
// target must be initialized memory (or nil).
|
||||||
|
func (v Value) assignTo(context string, dst *abi.Type, target unsafe.Pointer) Value {
|
||||||
|
if v.flag&flagMethod != 0 {
|
||||||
|
v = makeMethodValue(context, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case directlyAssignable(dst, v.typ()):
|
||||||
|
// Overwrite type so that they match.
|
||||||
|
// Same memory layout, so no harm done.
|
||||||
|
fl := v.flag&(flagAddr|flagIndir) | v.flag.ro()
|
||||||
|
fl |= flag(dst.Kind())
|
||||||
|
return Value{dst, v.ptr, fl}
|
||||||
|
|
||||||
|
case implements(dst, v.typ()):
|
||||||
|
if v.Kind() == Interface && v.IsNil() {
|
||||||
|
// A nil ReadWriter passed to nil Reader is OK,
|
||||||
|
// but using ifaceE2I below will panic.
|
||||||
|
// Avoid the panic by returning a nil dst (e.g., Reader) explicitly.
|
||||||
|
return Value{dst, nil, flag(Interface)}
|
||||||
|
}
|
||||||
|
/* TODO(xsw):
|
||||||
|
x := valueInterface(v, false)
|
||||||
|
if target == nil {
|
||||||
|
target = unsafe_New(dst)
|
||||||
|
}
|
||||||
|
if dst.NumMethod() == 0 {
|
||||||
|
*(*any)(target) = x
|
||||||
|
} else {
|
||||||
|
ifaceE2I(dst, x, target)
|
||||||
|
}
|
||||||
|
return Value{dst, target, flagIndir | flag(Interface)}
|
||||||
|
*/
|
||||||
|
}
|
||||||
|
|
||||||
|
// Failed.
|
||||||
|
// TODO(xsw):
|
||||||
|
// panic(context + ": value of type " + stringFor(v.typ()) + " is not assignable to type " + stringFor(dst))
|
||||||
|
panic("todo")
|
||||||
|
}
|
||||||
|
|
||||||
// memmove copies size bytes to dst from src. No write barriers are used.
|
// memmove copies size bytes to dst from src. No write barriers are used.
|
||||||
//
|
//
|
||||||
//go:linkname memmove C.memmove
|
//go:linkname memmove C.memmove
|
||||||
@@ -647,12 +1057,12 @@ func memmove(dst, src unsafe.Pointer, size uintptr)
|
|||||||
//go:linkname typedmemmove github.com/goplus/llgo/internal/runtime.Typedmemmove
|
//go:linkname typedmemmove github.com/goplus/llgo/internal/runtime.Typedmemmove
|
||||||
func typedmemmove(t *abi.Type, dst, src unsafe.Pointer)
|
func typedmemmove(t *abi.Type, dst, src unsafe.Pointer)
|
||||||
|
|
||||||
/* TODO(xsw):
|
|
||||||
// typedmemclr zeros the value at ptr of type t.
|
// typedmemclr zeros the value at ptr of type t.
|
||||||
//
|
//
|
||||||
//go:noescape
|
//go:linkname typedmemclr github.com/goplus/llgo/internal/runtime.Typedmemclr
|
||||||
func typedmemclr(t *abi.Type, ptr unsafe.Pointer)
|
func typedmemclr(t *abi.Type, ptr unsafe.Pointer)
|
||||||
|
|
||||||
|
/* TODO(xsw):
|
||||||
// typedmemclrpartial is like typedmemclr but assumes that
|
// typedmemclrpartial is like typedmemclr but assumes that
|
||||||
// dst points off bytes into the value and only clears size bytes.
|
// dst points off bytes into the value and only clears size bytes.
|
||||||
//
|
//
|
||||||
@@ -675,7 +1085,7 @@ func typedarrayclear(elemType *abi.Type, ptr unsafe.Pointer, len int)
|
|||||||
func typehash(t *abi.Type, p unsafe.Pointer, h uintptr) uintptr
|
func typehash(t *abi.Type, p unsafe.Pointer, h uintptr) uintptr
|
||||||
|
|
||||||
func verifyNotInHeapPtr(p uintptr) bool
|
func verifyNotInHeapPtr(p uintptr) bool
|
||||||
|
|
||||||
//go:noescape
|
|
||||||
func growslice(t *abi.Type, old unsafeheaderSlice, num int) unsafeheaderSlice
|
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
//go:linkname growslice github.com/goplus/llgo/internal/runtime.GrowSlice
|
||||||
|
func growslice(src unsafeheaderSlice, num, etSize int) unsafeheaderSlice
|
||||||
|
|||||||
@@ -286,6 +286,7 @@ func reflect_typedslicecopy(elemType *_type, dst, src slice) int {
|
|||||||
}
|
}
|
||||||
return typedslicecopy(elemType, dst.array, dst.len, src.array, src.len)
|
return typedslicecopy(elemType, dst.array, dst.len, src.array, src.len)
|
||||||
}
|
}
|
||||||
|
*/
|
||||||
|
|
||||||
// typedmemclr clears the typed memory at ptr with type typ. The
|
// typedmemclr clears the typed memory at ptr with type typ. The
|
||||||
// memory at ptr must already be initialized (and hence in type-safe
|
// memory at ptr must already be initialized (and hence in type-safe
|
||||||
@@ -296,20 +297,11 @@ func reflect_typedslicecopy(elemType *_type, dst, src slice) int {
|
|||||||
// call memclrHasPointers.
|
// call memclrHasPointers.
|
||||||
//
|
//
|
||||||
// TODO: A "go:nosplitrec" annotation would be perfect for this.
|
// TODO: A "go:nosplitrec" annotation would be perfect for this.
|
||||||
//
|
func Typedmemclr(typ *Type, ptr unsafe.Pointer) {
|
||||||
//go:nosplit
|
c.Memset(ptr, 0, typ.Size_)
|
||||||
func typedmemclr(typ *_type, ptr unsafe.Pointer) {
|
|
||||||
if writeBarrier.needed && typ.PtrBytes != 0 {
|
|
||||||
bulkBarrierPreWrite(uintptr(ptr), 0, typ.PtrBytes)
|
|
||||||
}
|
|
||||||
memclrNoHeapPointers(ptr, typ.Size_)
|
|
||||||
}
|
|
||||||
|
|
||||||
//go:linkname reflect_typedmemclr reflect.typedmemclr
|
|
||||||
func reflect_typedmemclr(typ *_type, ptr unsafe.Pointer) {
|
|
||||||
typedmemclr(typ, ptr)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
//go:linkname reflect_typedmemclrpartial reflect.typedmemclrpartial
|
//go:linkname reflect_typedmemclrpartial reflect.typedmemclrpartial
|
||||||
func reflect_typedmemclrpartial(typ *_type, ptr unsafe.Pointer, off, size uintptr) {
|
func reflect_typedmemclrpartial(typ *_type, ptr unsafe.Pointer, off, size uintptr) {
|
||||||
if writeBarrier.needed && typ.PtrBytes != 0 {
|
if writeBarrier.needed && typ.PtrBytes != 0 {
|
||||||
|
|||||||
@@ -1,40 +0,0 @@
|
|||||||
// Copyright 2009 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package runtime
|
|
||||||
|
|
||||||
// nextslicecap computes the next appropriate slice length.
|
|
||||||
func nextslicecap(newLen, oldCap int) int {
|
|
||||||
newcap := oldCap
|
|
||||||
doublecap := newcap + newcap
|
|
||||||
if newLen > doublecap {
|
|
||||||
return newLen
|
|
||||||
}
|
|
||||||
|
|
||||||
const threshold = 256
|
|
||||||
if oldCap < threshold {
|
|
||||||
return doublecap
|
|
||||||
}
|
|
||||||
for {
|
|
||||||
// Transition from growing 2x for small slices
|
|
||||||
// to growing 1.25x for large slices. This formula
|
|
||||||
// gives a smooth-ish transition between the two.
|
|
||||||
newcap += (newcap + 3*threshold) >> 2
|
|
||||||
|
|
||||||
// We need to check `newcap >= newLen` and whether `newcap` overflowed.
|
|
||||||
// newLen is guaranteed to be larger than zero, hence
|
|
||||||
// when newcap overflows then `uint(newcap) > uint(newLen)`.
|
|
||||||
// This allows to check for both with the same comparison.
|
|
||||||
if uint(newcap) >= uint(newLen) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set newcap to the requested cap when
|
|
||||||
// the newcap calculation overflowed.
|
|
||||||
if newcap <= 0 {
|
|
||||||
return newLen
|
|
||||||
}
|
|
||||||
return newcap
|
|
||||||
}
|
|
||||||
@@ -51,7 +51,15 @@ func SliceAppend(src Slice, data unsafe.Pointer, num, etSize int) Slice {
|
|||||||
return src
|
return src
|
||||||
}
|
}
|
||||||
oldLen := src.len
|
oldLen := src.len
|
||||||
newLen := src.len + num
|
src = GrowSlice(src, num, etSize)
|
||||||
|
c.Memcpy(c.Advance(src.data, oldLen*etSize), data, uintptr(num*etSize))
|
||||||
|
return src
|
||||||
|
}
|
||||||
|
|
||||||
|
// GrowSlice grows slice and returns the grown slice.
|
||||||
|
func GrowSlice(src Slice, num, etSize int) Slice {
|
||||||
|
oldLen := src.len
|
||||||
|
newLen := oldLen + num
|
||||||
if newLen > src.cap {
|
if newLen > src.cap {
|
||||||
newCap := nextslicecap(newLen, src.cap)
|
newCap := nextslicecap(newLen, src.cap)
|
||||||
p := AllocZ(uintptr(newCap * etSize))
|
p := AllocZ(uintptr(newCap * etSize))
|
||||||
@@ -62,10 +70,44 @@ func SliceAppend(src Slice, data unsafe.Pointer, num, etSize int) Slice {
|
|||||||
src.cap = newCap
|
src.cap = newCap
|
||||||
}
|
}
|
||||||
src.len = newLen
|
src.len = newLen
|
||||||
c.Memcpy(c.Advance(src.data, oldLen*etSize), data, uintptr(num*etSize))
|
|
||||||
return src
|
return src
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// nextslicecap computes the next appropriate slice length.
|
||||||
|
func nextslicecap(newLen, oldCap int) int {
|
||||||
|
newcap := oldCap
|
||||||
|
doublecap := newcap + newcap
|
||||||
|
if newLen > doublecap {
|
||||||
|
return newLen
|
||||||
|
}
|
||||||
|
|
||||||
|
const threshold = 256
|
||||||
|
if oldCap < threshold {
|
||||||
|
return doublecap
|
||||||
|
}
|
||||||
|
for {
|
||||||
|
// Transition from growing 2x for small slices
|
||||||
|
// to growing 1.25x for large slices. This formula
|
||||||
|
// gives a smooth-ish transition between the two.
|
||||||
|
newcap += (newcap + 3*threshold) >> 2
|
||||||
|
|
||||||
|
// We need to check `newcap >= newLen` and whether `newcap` overflowed.
|
||||||
|
// newLen is guaranteed to be larger than zero, hence
|
||||||
|
// when newcap overflows then `uint(newcap) > uint(newLen)`.
|
||||||
|
// This allows to check for both with the same comparison.
|
||||||
|
if uint(newcap) >= uint(newLen) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set newcap to the requested cap when
|
||||||
|
// the newcap calculation overflowed.
|
||||||
|
if newcap <= 0 {
|
||||||
|
return newLen
|
||||||
|
}
|
||||||
|
return newcap
|
||||||
|
}
|
||||||
|
|
||||||
// SliceCopy copy data to slice and returns a slice.
|
// SliceCopy copy data to slice and returns a slice.
|
||||||
func SliceCopy(dst Slice, data unsafe.Pointer, num int, etSize int) int {
|
func SliceCopy(dst Slice, data unsafe.Pointer, num int, etSize int) int {
|
||||||
n := dst.len
|
n := dst.len
|
||||||
|
|||||||
Reference in New Issue
Block a user