ardump: todo

This commit is contained in:
xushiwei
2024-04-19 13:29:56 +08:00
parent 6463e2cc2a
commit 4f5a656a9f
8 changed files with 405 additions and 12 deletions

View File

@@ -1,259 +0,0 @@
/*
* Copyright (c) 2023 The GoPlus Authors (goplus.org). All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ar
import (
"bytes"
"debug/elf"
"debug/pe"
"encoding/binary"
"errors"
"fmt"
"io"
"os"
"path/filepath"
"time"
wasm "github.com/aykevl/go-wasm"
"github.com/blakesmith/ar"
)
// Create creates an arcive for static linking from a list of object files
// given as a parameter. It is equivalent to the following command:
//
// ar -rcs <archivePath> <objs...>
func Create(arfile string, objs []string) error {
f, err := os.Create(arfile)
if err != nil {
return err
}
err = Make(f, objs)
f.Close()
if err != nil {
os.Remove(arfile)
}
return err
}
// Make creates an arcive for static linking from a list of object files
// given as a parameter. It is equivalent to the following command:
//
// ar -rcs <archivePath> <objs...>
func Make(arfile io.WriteSeeker, objs []string) error {
// Open the archive file.
arwriter := ar.NewWriter(arfile)
err := arwriter.WriteGlobalHeader()
if err != nil {
return err
}
// Open all object files and read the symbols for the symbol table.
symbolTable := []struct {
name string // symbol name
fileIndex int // index into objfiles
}{}
archiveOffsets := make([]int32, len(objs))
for i, objpath := range objs {
objfile, err := os.Open(objpath)
if err != nil {
return err
}
// Read the symbols and add them to the symbol table.
if dbg, err := elf.NewFile(objfile); err == nil {
symbols, err := dbg.Symbols()
if err != nil {
return err
}
for _, symbol := range symbols {
bind := elf.ST_BIND(symbol.Info)
if bind != elf.STB_GLOBAL && bind != elf.STB_WEAK {
// Don't include local symbols (STB_LOCAL).
continue
}
if elf.ST_TYPE(symbol.Info) != elf.STT_FUNC && elf.ST_TYPE(symbol.Info) != elf.STT_OBJECT {
// Not a function.
continue
}
// Include in archive.
symbolTable = append(symbolTable, struct {
name string
fileIndex int
}{symbol.Name, i})
}
} else if dbg, err := pe.NewFile(objfile); err == nil {
for _, symbol := range dbg.Symbols {
if symbol.StorageClass != 2 {
continue
}
if symbol.SectionNumber == 0 {
continue
}
symbolTable = append(symbolTable, struct {
name string
fileIndex int
}{symbol.Name, i})
}
} else if dbg, err := wasm.Parse(objfile); err == nil {
for _, s := range dbg.Sections {
switch section := s.(type) {
case *wasm.SectionImport:
for _, ln := range section.Entries {
if ln.Kind != wasm.ExtKindFunction {
// Not a function
continue
}
symbolTable = append(symbolTable, struct {
name string
fileIndex int
}{ln.Field, i})
}
}
}
} else {
return fmt.Errorf("failed to open file %s as WASM, ELF or PE/COFF: %w", objpath, err)
}
// Close file, to avoid issues with too many open files (especially on
// MacOS X).
objfile.Close()
}
// Create the symbol table buffer.
// For some (sparse) details on the file format:
// https://en.wikipedia.org/wiki/Ar_(Unix)#System_V_(or_GNU)_variant
buf := &bytes.Buffer{}
binary.Write(buf, binary.BigEndian, int32(len(symbolTable)))
for range symbolTable {
// This is a placeholder index, it will be updated after all files have
// been written to the archive (see the end of this function).
err = binary.Write(buf, binary.BigEndian, int32(0))
if err != nil {
return err
}
}
for _, sym := range symbolTable {
_, err := buf.Write([]byte(sym.name + "\x00"))
if err != nil {
return err
}
}
for buf.Len()%2 != 0 {
// The symbol table must be aligned.
// This appears to be required by lld.
buf.WriteByte(0)
}
// Write the symbol table.
err = arwriter.WriteHeader(&ar.Header{
Name: "/",
ModTime: time.Unix(0, 0),
Uid: 0,
Gid: 0,
Mode: 0,
Size: int64(buf.Len()),
})
if err != nil {
return err
}
// Keep track of the start of the symbol table.
symbolTableStart, err := arfile.Seek(0, io.SeekCurrent)
if err != nil {
return err
}
// Write symbol table contents.
_, err = arfile.Write(buf.Bytes())
if err != nil {
return err
}
// Add all object files to the archive.
var copyBuf bytes.Buffer
for i, objpath := range objs {
objfile, err := os.Open(objpath)
if err != nil {
return err
}
defer objfile.Close()
// Store the start index, for when we'll update the symbol table with
// the correct file start indices.
offset, err := arfile.Seek(0, io.SeekCurrent)
if err != nil {
return err
}
if int64(int32(offset)) != offset {
return errors.New("large archives (4GB+) not supported")
}
archiveOffsets[i] = int32(offset)
// Write the file header.
st, err := objfile.Stat()
if err != nil {
return err
}
err = arwriter.WriteHeader(&ar.Header{
Name: filepath.Base(objfile.Name()),
ModTime: time.Unix(0, 0),
Uid: 0,
Gid: 0,
Mode: 0644,
Size: st.Size(),
})
if err != nil {
return err
}
// Copy the file contents into the archive.
// First load all contents into a buffer, then write it all in one go to
// the archive file. This is a bit complicated, but is necessary because
// io.Copy can't deal with files that are of an odd size.
copyBuf.Reset()
n, err := io.Copy(&copyBuf, objfile)
if err != nil {
return fmt.Errorf("could not copy object file into ar file: %w", err)
}
if n != st.Size() {
return errors.New("file modified during ar creation")
}
_, err = arwriter.Write(copyBuf.Bytes())
if err != nil {
return fmt.Errorf("could not copy object file into ar file: %w", err)
}
// File is not needed anymore.
objfile.Close()
}
// Create symbol indices.
indicesBuf := &bytes.Buffer{}
for _, sym := range symbolTable {
err = binary.Write(indicesBuf, binary.BigEndian, archiveOffsets[sym.fileIndex])
if err != nil {
return err
}
}
// Overwrite placeholder indices.
if _, err = arfile.Seek(symbolTableStart+4, io.SeekStart); err == nil {
_, err = arfile.Write(indicesBuf.Bytes())
}
return err
}

43
x/ar/common.go Normal file
View File

@@ -0,0 +1,43 @@
/*
* Copyright (c) 2024 The GoPlus Authors (goplus.org). All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ar
import (
"time"
)
const (
headerByteSize = 60
globalHeader = "!<arch>\n"
)
type Header struct {
Name string
ModTime time.Time
Uid int
Gid int
Mode int64
Size int64
}
type slicer []byte
func (sp *slicer) next(n int) (b []byte) {
s := *sp
b, *sp = s[0:n], s[n:]
return
}

148
x/ar/reader.go Normal file
View File

@@ -0,0 +1,148 @@
/*
* Copyright (c) 2024 The GoPlus Authors (goplus.org). All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ar
import (
"io"
"strconv"
"time"
)
// Provides read access to an ar archive.
// Call next to skip files
//
// Example:
// reader := NewReader(f)
// var buf bytes.Buffer
// for {
// _, err := reader.Next()
// if err == io.EOF {
// break
// }
// if err != nil {
// t.Errorf(err.Error())
// }
// io.Copy(&buf, reader)
// }
type Reader struct {
r io.Reader
nb int64
pad int64
}
// Copies read data to r. Strips the global ar header.
func NewReader(r io.Reader) *Reader {
io.CopyN(io.Discard, r, 8) // Discard global header
return &Reader{r: r}
}
func (rd *Reader) string(b []byte) string {
i := len(b) - 1
for i > 0 && b[i] == 32 {
i--
}
return string(b[0 : i+1])
}
func (rd *Reader) numeric(b []byte) int64 {
i := len(b) - 1
for i > 0 && b[i] == 32 {
i--
}
n, _ := strconv.ParseInt(string(b[0:i+1]), 10, 64)
return n
}
func (rd *Reader) octal(b []byte) int64 {
i := len(b) - 1
for i > 0 && b[i] == 32 {
i--
}
n, _ := strconv.ParseInt(string(b[3:i+1]), 8, 64)
return n
}
func (rd *Reader) skipUnread() error {
skip := rd.nb + rd.pad
rd.nb, rd.pad = 0, 0
if seeker, ok := rd.r.(io.Seeker); ok {
_, err := seeker.Seek(skip, io.SeekCurrent)
return err
}
_, err := io.CopyN(io.Discard, rd.r, skip)
return err
}
func (rd *Reader) readHeader() (*Header, error) {
headerBuf := make([]byte, headerByteSize)
if _, err := io.ReadFull(rd.r, headerBuf); err != nil {
return nil, err
}
header := new(Header)
s := slicer(headerBuf)
header.Name = rd.string(s.next(16))
header.ModTime = time.Unix(rd.numeric(s.next(12)), 0)
header.Uid = int(rd.numeric(s.next(6)))
header.Gid = int(rd.numeric(s.next(6)))
header.Mode = rd.octal(s.next(8))
header.Size = rd.numeric(s.next(10))
rd.nb = int64(header.Size)
if header.Size%2 == 1 {
rd.pad = 1
} else {
rd.pad = 0
}
return header, nil
}
// Call Next() to skip to the next file in the archive file.
// Returns a Header which contains the metadata about the
// file in the archive.
func (rd *Reader) Next() (*Header, error) {
err := rd.skipUnread()
if err != nil {
return nil, err
}
return rd.readHeader()
}
// Read data from the current entry in the archive.
func (rd *Reader) Read(b []byte) (n int, err error) {
if rd.nb == 0 {
return 0, io.EOF
}
if int64(len(b)) > rd.nb {
b = b[0:rd.nb]
}
n, err = rd.r.Read(b)
rd.nb -= int64(n)
return
}

121
x/ar/writer.go Normal file
View File

@@ -0,0 +1,121 @@
/*
* Copyright (c) 2024 The GoPlus Authors (goplus.org). All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ar
import (
"errors"
"io"
"strconv"
)
var (
errWriteTooLong = errors.New("ar: write too long")
)
// Writer provides sequential writing of an ar archive.
// An ar archive is sequence of header file pairs
// Call WriteHeader to begin writing a new file, then call Write to supply the file's data
//
// Example:
// archive := ar.NewWriter(writer)
// archive.WriteGlobalHeader()
// header := new(ar.Header)
// header.Size = 15 // bytes
//
// if err := archive.WriteHeader(header); err != nil {
// return err
// }
//
// io.Copy(archive, data)
type Writer struct {
w io.Writer
nb int64 // number of unwritten bytes for the current file entry
}
// Create a new ar writer that writes to w
func NewWriter(w io.Writer) *Writer { return &Writer{w: w} }
func (aw *Writer) numeric(b []byte, x int64) {
s := strconv.FormatInt(x, 10)
for len(s) < len(b) {
s = s + " "
}
copy(b, []byte(s))
}
func (aw *Writer) octal(b []byte, x int64) {
s := "100" + strconv.FormatInt(x, 8)
for len(s) < len(b) {
s = s + " "
}
copy(b, []byte(s))
}
func (aw *Writer) string(b []byte, str string) {
s := str
for len(s) < len(b) {
s = s + " "
}
copy(b, []byte(s))
}
// Writes to the current entry in the ar archive
// Returns ErrWriteTooLong if more than header.Size
// bytes are written after a call to WriteHeader
func (aw *Writer) Write(b []byte) (n int, err error) {
if int64(len(b)) > aw.nb {
b = b[0:aw.nb]
err = errWriteTooLong
}
n, werr := aw.w.Write(b)
aw.nb -= int64(n)
if werr != nil {
return n, werr
}
if len(b)%2 == 1 { // data size must be aligned to an even byte
n2, _ := aw.w.Write([]byte{'\n'})
return n + n2, err
}
return
}
func (aw *Writer) WriteGlobalHeader() error {
_, err := aw.w.Write([]byte(globalHeader))
return err
}
// Writes the header to the underlying writer and prepares
// to receive the file payload
func (aw *Writer) WriteHeader(hdr *Header) error {
aw.nb = int64(hdr.Size)
header := make([]byte, headerByteSize)
s := slicer(header)
aw.string(s.next(16), hdr.Name)
aw.numeric(s.next(12), hdr.ModTime.Unix())
aw.numeric(s.next(6), int64(hdr.Uid))
aw.numeric(s.next(6), int64(hdr.Gid))
aw.octal(s.next(8), hdr.Mode)
aw.numeric(s.next(10), hdr.Size)
aw.string(s.next(2), "`\n")
_, err := aw.w.Write(header)
return err
}

View File

@@ -0,0 +1,40 @@
/*
* Copyright (c) 2024 The GoPlus Authors (goplus.org). All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package llexportdata
import (
"go/token"
"go/types"
"io"
)
// Read reads export data from in, decodes it, and returns type information for the package.
//
// The package path (effectively its linker symbol prefix) is specified by path, since unlike
// the package name, this information may not be recorded in the export data.
//
// File position information is added to fset.
//
// Read may inspect and add to the imports map to ensure that references within the export data
// to other packages are consistent. The caller must ensure that imports[path] does not exist,
// or exists but is incomplete (see types.Package.Complete), and Read inserts the resulting package
// into this map entry.
//
// On return, the state of the reader is undefined.
func Read(in io.Reader, fset *token.FileSet, imports map[string]*types.Package, path string) (*types.Package, error) {
panic("todo")
}