Initial commit: Go 1.23 release state

This commit is contained in:
Vorapol Rinsatitnon
2024-09-21 23:49:08 +10:00
commit 17cd57a668
13231 changed files with 3114330 additions and 0 deletions

115
src/bytes/boundary_test.go Normal file
View File

@@ -0,0 +1,115 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//
//go:build linux
package bytes_test
import (
. "bytes"
"syscall"
"testing"
)
// This file tests the situation where byte operations are checking
// data very near to a page boundary. We want to make sure those
// operations do not read across the boundary and cause a page
// fault where they shouldn't.
// These tests run only on linux. The code being tested is
// not OS-specific, so it does not need to be tested on all
// operating systems.
// dangerousSlice returns a slice which is immediately
// preceded and followed by a faulting page.
func dangerousSlice(t *testing.T) []byte {
pagesize := syscall.Getpagesize()
b, err := syscall.Mmap(0, 0, 3*pagesize, syscall.PROT_READ|syscall.PROT_WRITE, syscall.MAP_ANONYMOUS|syscall.MAP_PRIVATE)
if err != nil {
t.Fatalf("mmap failed %s", err)
}
err = syscall.Mprotect(b[:pagesize], syscall.PROT_NONE)
if err != nil {
t.Fatalf("mprotect low failed %s\n", err)
}
err = syscall.Mprotect(b[2*pagesize:], syscall.PROT_NONE)
if err != nil {
t.Fatalf("mprotect high failed %s\n", err)
}
return b[pagesize : 2*pagesize]
}
func TestEqualNearPageBoundary(t *testing.T) {
t.Parallel()
b := dangerousSlice(t)
for i := range b {
b[i] = 'A'
}
for i := 0; i <= len(b); i++ {
Equal(b[:i], b[len(b)-i:])
Equal(b[len(b)-i:], b[:i])
}
}
func TestIndexByteNearPageBoundary(t *testing.T) {
t.Parallel()
b := dangerousSlice(t)
for i := range b {
idx := IndexByte(b[i:], 1)
if idx != -1 {
t.Fatalf("IndexByte(b[%d:])=%d, want -1\n", i, idx)
}
}
}
func TestIndexNearPageBoundary(t *testing.T) {
t.Parallel()
q := dangerousSlice(t)
if len(q) > 64 {
// Only worry about when we're near the end of a page.
q = q[len(q)-64:]
}
b := dangerousSlice(t)
if len(b) > 256 {
// Only worry about when we're near the end of a page.
b = b[len(b)-256:]
}
for j := 1; j < len(q); j++ {
q[j-1] = 1 // difference is only found on the last byte
for i := range b {
idx := Index(b[i:], q[:j])
if idx != -1 {
t.Fatalf("Index(b[%d:], q[:%d])=%d, want -1\n", i, j, idx)
}
}
q[j-1] = 0
}
// Test differing alignments and sizes of q which always end on a page boundary.
q[len(q)-1] = 1 // difference is only found on the last byte
for j := 0; j < len(q); j++ {
for i := range b {
idx := Index(b[i:], q[j:])
if idx != -1 {
t.Fatalf("Index(b[%d:], q[%d:])=%d, want -1\n", i, j, idx)
}
}
}
q[len(q)-1] = 0
}
func TestCountNearPageBoundary(t *testing.T) {
t.Parallel()
b := dangerousSlice(t)
for i := range b {
c := Count(b[i:], []byte{1})
if c != 0 {
t.Fatalf("Count(b[%d:], {1})=%d, want 0\n", i, c)
}
c = Count(b[:i], []byte{0})
if c != i {
t.Fatalf("Count(b[:%d], {0})=%d, want %d\n", i, c, i)
}
}
}

482
src/bytes/buffer.go Normal file
View File

@@ -0,0 +1,482 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package bytes
// Simple byte buffer for marshaling data.
import (
"errors"
"io"
"unicode/utf8"
)
// smallBufferSize is an initial allocation minimal capacity.
const smallBufferSize = 64
// A Buffer is a variable-sized buffer of bytes with [Buffer.Read] and [Buffer.Write] methods.
// The zero value for Buffer is an empty buffer ready to use.
type Buffer struct {
buf []byte // contents are the bytes buf[off : len(buf)]
off int // read at &buf[off], write at &buf[len(buf)]
lastRead readOp // last read operation, so that Unread* can work correctly.
}
// The readOp constants describe the last action performed on
// the buffer, so that UnreadRune and UnreadByte can check for
// invalid usage. opReadRuneX constants are chosen such that
// converted to int they correspond to the rune size that was read.
type readOp int8
// Don't use iota for these, as the values need to correspond with the
// names and comments, which is easier to see when being explicit.
const (
opRead readOp = -1 // Any other read operation.
opInvalid readOp = 0 // Non-read operation.
opReadRune1 readOp = 1 // Read rune of size 1.
opReadRune2 readOp = 2 // Read rune of size 2.
opReadRune3 readOp = 3 // Read rune of size 3.
opReadRune4 readOp = 4 // Read rune of size 4.
)
// ErrTooLarge is passed to panic if memory cannot be allocated to store data in a buffer.
var ErrTooLarge = errors.New("bytes.Buffer: too large")
var errNegativeRead = errors.New("bytes.Buffer: reader returned negative count from Read")
const maxInt = int(^uint(0) >> 1)
// Bytes returns a slice of length b.Len() holding the unread portion of the buffer.
// The slice is valid for use only until the next buffer modification (that is,
// only until the next call to a method like [Buffer.Read], [Buffer.Write], [Buffer.Reset], or [Buffer.Truncate]).
// The slice aliases the buffer content at least until the next buffer modification,
// so immediate changes to the slice will affect the result of future reads.
func (b *Buffer) Bytes() []byte { return b.buf[b.off:] }
// AvailableBuffer returns an empty buffer with b.Available() capacity.
// This buffer is intended to be appended to and
// passed to an immediately succeeding [Buffer.Write] call.
// The buffer is only valid until the next write operation on b.
func (b *Buffer) AvailableBuffer() []byte { return b.buf[len(b.buf):] }
// String returns the contents of the unread portion of the buffer
// as a string. If the [Buffer] is a nil pointer, it returns "<nil>".
//
// To build strings more efficiently, see the [strings.Builder] type.
func (b *Buffer) String() string {
if b == nil {
// Special case, useful in debugging.
return "<nil>"
}
return string(b.buf[b.off:])
}
// empty reports whether the unread portion of the buffer is empty.
func (b *Buffer) empty() bool { return len(b.buf) <= b.off }
// Len returns the number of bytes of the unread portion of the buffer;
// b.Len() == len(b.Bytes()).
func (b *Buffer) Len() int { return len(b.buf) - b.off }
// Cap returns the capacity of the buffer's underlying byte slice, that is, the
// total space allocated for the buffer's data.
func (b *Buffer) Cap() int { return cap(b.buf) }
// Available returns how many bytes are unused in the buffer.
func (b *Buffer) Available() int { return cap(b.buf) - len(b.buf) }
// Truncate discards all but the first n unread bytes from the buffer
// but continues to use the same allocated storage.
// It panics if n is negative or greater than the length of the buffer.
func (b *Buffer) Truncate(n int) {
if n == 0 {
b.Reset()
return
}
b.lastRead = opInvalid
if n < 0 || n > b.Len() {
panic("bytes.Buffer: truncation out of range")
}
b.buf = b.buf[:b.off+n]
}
// Reset resets the buffer to be empty,
// but it retains the underlying storage for use by future writes.
// Reset is the same as [Buffer.Truncate](0).
func (b *Buffer) Reset() {
b.buf = b.buf[:0]
b.off = 0
b.lastRead = opInvalid
}
// tryGrowByReslice is an inlineable version of grow for the fast-case where the
// internal buffer only needs to be resliced.
// It returns the index where bytes should be written and whether it succeeded.
func (b *Buffer) tryGrowByReslice(n int) (int, bool) {
if l := len(b.buf); n <= cap(b.buf)-l {
b.buf = b.buf[:l+n]
return l, true
}
return 0, false
}
// grow grows the buffer to guarantee space for n more bytes.
// It returns the index where bytes should be written.
// If the buffer can't grow it will panic with ErrTooLarge.
func (b *Buffer) grow(n int) int {
m := b.Len()
// If buffer is empty, reset to recover space.
if m == 0 && b.off != 0 {
b.Reset()
}
// Try to grow by means of a reslice.
if i, ok := b.tryGrowByReslice(n); ok {
return i
}
if b.buf == nil && n <= smallBufferSize {
b.buf = make([]byte, n, smallBufferSize)
return 0
}
c := cap(b.buf)
if n <= c/2-m {
// We can slide things down instead of allocating a new
// slice. We only need m+n <= c to slide, but
// we instead let capacity get twice as large so we
// don't spend all our time copying.
copy(b.buf, b.buf[b.off:])
} else if c > maxInt-c-n {
panic(ErrTooLarge)
} else {
// Add b.off to account for b.buf[:b.off] being sliced off the front.
b.buf = growSlice(b.buf[b.off:], b.off+n)
}
// Restore b.off and len(b.buf).
b.off = 0
b.buf = b.buf[:m+n]
return m
}
// Grow grows the buffer's capacity, if necessary, to guarantee space for
// another n bytes. After Grow(n), at least n bytes can be written to the
// buffer without another allocation.
// If n is negative, Grow will panic.
// If the buffer can't grow it will panic with [ErrTooLarge].
func (b *Buffer) Grow(n int) {
if n < 0 {
panic("bytes.Buffer.Grow: negative count")
}
m := b.grow(n)
b.buf = b.buf[:m]
}
// Write appends the contents of p to the buffer, growing the buffer as
// needed. The return value n is the length of p; err is always nil. If the
// buffer becomes too large, Write will panic with [ErrTooLarge].
func (b *Buffer) Write(p []byte) (n int, err error) {
b.lastRead = opInvalid
m, ok := b.tryGrowByReslice(len(p))
if !ok {
m = b.grow(len(p))
}
return copy(b.buf[m:], p), nil
}
// WriteString appends the contents of s to the buffer, growing the buffer as
// needed. The return value n is the length of s; err is always nil. If the
// buffer becomes too large, WriteString will panic with [ErrTooLarge].
func (b *Buffer) WriteString(s string) (n int, err error) {
b.lastRead = opInvalid
m, ok := b.tryGrowByReslice(len(s))
if !ok {
m = b.grow(len(s))
}
return copy(b.buf[m:], s), nil
}
// MinRead is the minimum slice size passed to a [Buffer.Read] call by
// [Buffer.ReadFrom]. As long as the [Buffer] has at least MinRead bytes beyond
// what is required to hold the contents of r, [Buffer.ReadFrom] will not grow the
// underlying buffer.
const MinRead = 512
// ReadFrom reads data from r until EOF and appends it to the buffer, growing
// the buffer as needed. The return value n is the number of bytes read. Any
// error except io.EOF encountered during the read is also returned. If the
// buffer becomes too large, ReadFrom will panic with [ErrTooLarge].
func (b *Buffer) ReadFrom(r io.Reader) (n int64, err error) {
b.lastRead = opInvalid
for {
i := b.grow(MinRead)
b.buf = b.buf[:i]
m, e := r.Read(b.buf[i:cap(b.buf)])
if m < 0 {
panic(errNegativeRead)
}
b.buf = b.buf[:i+m]
n += int64(m)
if e == io.EOF {
return n, nil // e is EOF, so return nil explicitly
}
if e != nil {
return n, e
}
}
}
// growSlice grows b by n, preserving the original content of b.
// If the allocation fails, it panics with ErrTooLarge.
func growSlice(b []byte, n int) []byte {
defer func() {
if recover() != nil {
panic(ErrTooLarge)
}
}()
// TODO(http://golang.org/issue/51462): We should rely on the append-make
// pattern so that the compiler can call runtime.growslice. For example:
// return append(b, make([]byte, n)...)
// This avoids unnecessary zero-ing of the first len(b) bytes of the
// allocated slice, but this pattern causes b to escape onto the heap.
//
// Instead use the append-make pattern with a nil slice to ensure that
// we allocate buffers rounded up to the closest size class.
c := len(b) + n // ensure enough space for n elements
if c < 2*cap(b) {
// The growth rate has historically always been 2x. In the future,
// we could rely purely on append to determine the growth rate.
c = 2 * cap(b)
}
b2 := append([]byte(nil), make([]byte, c)...)
copy(b2, b)
return b2[:len(b)]
}
// WriteTo writes data to w until the buffer is drained or an error occurs.
// The return value n is the number of bytes written; it always fits into an
// int, but it is int64 to match the [io.WriterTo] interface. Any error
// encountered during the write is also returned.
func (b *Buffer) WriteTo(w io.Writer) (n int64, err error) {
b.lastRead = opInvalid
if nBytes := b.Len(); nBytes > 0 {
m, e := w.Write(b.buf[b.off:])
if m > nBytes {
panic("bytes.Buffer.WriteTo: invalid Write count")
}
b.off += m
n = int64(m)
if e != nil {
return n, e
}
// all bytes should have been written, by definition of
// Write method in io.Writer
if m != nBytes {
return n, io.ErrShortWrite
}
}
// Buffer is now empty; reset.
b.Reset()
return n, nil
}
// WriteByte appends the byte c to the buffer, growing the buffer as needed.
// The returned error is always nil, but is included to match [bufio.Writer]'s
// WriteByte. If the buffer becomes too large, WriteByte will panic with
// [ErrTooLarge].
func (b *Buffer) WriteByte(c byte) error {
b.lastRead = opInvalid
m, ok := b.tryGrowByReslice(1)
if !ok {
m = b.grow(1)
}
b.buf[m] = c
return nil
}
// WriteRune appends the UTF-8 encoding of Unicode code point r to the
// buffer, returning its length and an error, which is always nil but is
// included to match [bufio.Writer]'s WriteRune. The buffer is grown as needed;
// if it becomes too large, WriteRune will panic with [ErrTooLarge].
func (b *Buffer) WriteRune(r rune) (n int, err error) {
// Compare as uint32 to correctly handle negative runes.
if uint32(r) < utf8.RuneSelf {
b.WriteByte(byte(r))
return 1, nil
}
b.lastRead = opInvalid
m, ok := b.tryGrowByReslice(utf8.UTFMax)
if !ok {
m = b.grow(utf8.UTFMax)
}
b.buf = utf8.AppendRune(b.buf[:m], r)
return len(b.buf) - m, nil
}
// Read reads the next len(p) bytes from the buffer or until the buffer
// is drained. The return value n is the number of bytes read. If the
// buffer has no data to return, err is [io.EOF] (unless len(p) is zero);
// otherwise it is nil.
func (b *Buffer) Read(p []byte) (n int, err error) {
b.lastRead = opInvalid
if b.empty() {
// Buffer is empty, reset to recover space.
b.Reset()
if len(p) == 0 {
return 0, nil
}
return 0, io.EOF
}
n = copy(p, b.buf[b.off:])
b.off += n
if n > 0 {
b.lastRead = opRead
}
return n, nil
}
// Next returns a slice containing the next n bytes from the buffer,
// advancing the buffer as if the bytes had been returned by [Buffer.Read].
// If there are fewer than n bytes in the buffer, Next returns the entire buffer.
// The slice is only valid until the next call to a read or write method.
func (b *Buffer) Next(n int) []byte {
b.lastRead = opInvalid
m := b.Len()
if n > m {
n = m
}
data := b.buf[b.off : b.off+n]
b.off += n
if n > 0 {
b.lastRead = opRead
}
return data
}
// ReadByte reads and returns the next byte from the buffer.
// If no byte is available, it returns error [io.EOF].
func (b *Buffer) ReadByte() (byte, error) {
if b.empty() {
// Buffer is empty, reset to recover space.
b.Reset()
return 0, io.EOF
}
c := b.buf[b.off]
b.off++
b.lastRead = opRead
return c, nil
}
// ReadRune reads and returns the next UTF-8-encoded
// Unicode code point from the buffer.
// If no bytes are available, the error returned is io.EOF.
// If the bytes are an erroneous UTF-8 encoding, it
// consumes one byte and returns U+FFFD, 1.
func (b *Buffer) ReadRune() (r rune, size int, err error) {
if b.empty() {
// Buffer is empty, reset to recover space.
b.Reset()
return 0, 0, io.EOF
}
c := b.buf[b.off]
if c < utf8.RuneSelf {
b.off++
b.lastRead = opReadRune1
return rune(c), 1, nil
}
r, n := utf8.DecodeRune(b.buf[b.off:])
b.off += n
b.lastRead = readOp(n)
return r, n, nil
}
// UnreadRune unreads the last rune returned by [Buffer.ReadRune].
// If the most recent read or write operation on the buffer was
// not a successful [Buffer.ReadRune], UnreadRune returns an error. (In this regard
// it is stricter than [Buffer.UnreadByte], which will unread the last byte
// from any read operation.)
func (b *Buffer) UnreadRune() error {
if b.lastRead <= opInvalid {
return errors.New("bytes.Buffer: UnreadRune: previous operation was not a successful ReadRune")
}
if b.off >= int(b.lastRead) {
b.off -= int(b.lastRead)
}
b.lastRead = opInvalid
return nil
}
var errUnreadByte = errors.New("bytes.Buffer: UnreadByte: previous operation was not a successful read")
// UnreadByte unreads the last byte returned by the most recent successful
// read operation that read at least one byte. If a write has happened since
// the last read, if the last read returned an error, or if the read read zero
// bytes, UnreadByte returns an error.
func (b *Buffer) UnreadByte() error {
if b.lastRead == opInvalid {
return errUnreadByte
}
b.lastRead = opInvalid
if b.off > 0 {
b.off--
}
return nil
}
// ReadBytes reads until the first occurrence of delim in the input,
// returning a slice containing the data up to and including the delimiter.
// If ReadBytes encounters an error before finding a delimiter,
// it returns the data read before the error and the error itself (often [io.EOF]).
// ReadBytes returns err != nil if and only if the returned data does not end in
// delim.
func (b *Buffer) ReadBytes(delim byte) (line []byte, err error) {
slice, err := b.readSlice(delim)
// return a copy of slice. The buffer's backing array may
// be overwritten by later calls.
line = append(line, slice...)
return line, err
}
// readSlice is like ReadBytes but returns a reference to internal buffer data.
func (b *Buffer) readSlice(delim byte) (line []byte, err error) {
i := IndexByte(b.buf[b.off:], delim)
end := b.off + i + 1
if i < 0 {
end = len(b.buf)
err = io.EOF
}
line = b.buf[b.off:end]
b.off = end
b.lastRead = opRead
return line, err
}
// ReadString reads until the first occurrence of delim in the input,
// returning a string containing the data up to and including the delimiter.
// If ReadString encounters an error before finding a delimiter,
// it returns the data read before the error and the error itself (often [io.EOF]).
// ReadString returns err != nil if and only if the returned data does not end
// in delim.
func (b *Buffer) ReadString(delim byte) (line string, err error) {
slice, err := b.readSlice(delim)
return string(slice), err
}
// NewBuffer creates and initializes a new [Buffer] using buf as its
// initial contents. The new [Buffer] takes ownership of buf, and the
// caller should not use buf after this call. NewBuffer is intended to
// prepare a [Buffer] to read existing data. It can also be used to set
// the initial size of the internal buffer for writing. To do that,
// buf should have the desired capacity but a length of zero.
//
// In most cases, new([Buffer]) (or just declaring a [Buffer] variable) is
// sufficient to initialize a [Buffer].
func NewBuffer(buf []byte) *Buffer { return &Buffer{buf: buf} }
// NewBufferString creates and initializes a new [Buffer] using string s as its
// initial contents. It is intended to prepare a buffer to read an existing
// string.
//
// In most cases, new([Buffer]) (or just declaring a [Buffer] variable) is
// sufficient to initialize a [Buffer].
func NewBufferString(s string) *Buffer {
return &Buffer{buf: []byte(s)}
}

747
src/bytes/buffer_test.go Normal file
View File

@@ -0,0 +1,747 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package bytes_test
import (
. "bytes"
"fmt"
"internal/testenv"
"io"
"math/rand"
"strconv"
"testing"
"unicode/utf8"
)
const N = 10000 // make this bigger for a larger (and slower) test
var testString string // test data for write tests
var testBytes []byte // test data; same as testString but as a slice.
type negativeReader struct{}
func (r *negativeReader) Read([]byte) (int, error) { return -1, nil }
func init() {
testBytes = make([]byte, N)
for i := 0; i < N; i++ {
testBytes[i] = 'a' + byte(i%26)
}
testString = string(testBytes)
}
// Verify that contents of buf match the string s.
func check(t *testing.T, testname string, buf *Buffer, s string) {
bytes := buf.Bytes()
str := buf.String()
if buf.Len() != len(bytes) {
t.Errorf("%s: buf.Len() == %d, len(buf.Bytes()) == %d", testname, buf.Len(), len(bytes))
}
if buf.Len() != len(str) {
t.Errorf("%s: buf.Len() == %d, len(buf.String()) == %d", testname, buf.Len(), len(str))
}
if buf.Len() != len(s) {
t.Errorf("%s: buf.Len() == %d, len(s) == %d", testname, buf.Len(), len(s))
}
if string(bytes) != s {
t.Errorf("%s: string(buf.Bytes()) == %q, s == %q", testname, string(bytes), s)
}
}
// Fill buf through n writes of string fus.
// The initial contents of buf corresponds to the string s;
// the result is the final contents of buf returned as a string.
func fillString(t *testing.T, testname string, buf *Buffer, s string, n int, fus string) string {
check(t, testname+" (fill 1)", buf, s)
for ; n > 0; n-- {
m, err := buf.WriteString(fus)
if m != len(fus) {
t.Errorf(testname+" (fill 2): m == %d, expected %d", m, len(fus))
}
if err != nil {
t.Errorf(testname+" (fill 3): err should always be nil, found err == %s", err)
}
s += fus
check(t, testname+" (fill 4)", buf, s)
}
return s
}
// Fill buf through n writes of byte slice fub.
// The initial contents of buf corresponds to the string s;
// the result is the final contents of buf returned as a string.
func fillBytes(t *testing.T, testname string, buf *Buffer, s string, n int, fub []byte) string {
check(t, testname+" (fill 1)", buf, s)
for ; n > 0; n-- {
m, err := buf.Write(fub)
if m != len(fub) {
t.Errorf(testname+" (fill 2): m == %d, expected %d", m, len(fub))
}
if err != nil {
t.Errorf(testname+" (fill 3): err should always be nil, found err == %s", err)
}
s += string(fub)
check(t, testname+" (fill 4)", buf, s)
}
return s
}
func TestNewBuffer(t *testing.T) {
buf := NewBuffer(testBytes)
check(t, "NewBuffer", buf, testString)
}
var buf Buffer
// Calling NewBuffer and immediately shallow copying the Buffer struct
// should not result in any allocations.
// This can be used to reset the underlying []byte of an existing Buffer.
func TestNewBufferShallow(t *testing.T) {
testenv.SkipIfOptimizationOff(t)
n := testing.AllocsPerRun(1000, func() {
buf = *NewBuffer(testBytes)
})
if n > 0 {
t.Errorf("allocations occurred while shallow copying")
}
check(t, "NewBuffer", &buf, testString)
}
func TestNewBufferString(t *testing.T) {
buf := NewBufferString(testString)
check(t, "NewBufferString", buf, testString)
}
// Empty buf through repeated reads into fub.
// The initial contents of buf corresponds to the string s.
func empty(t *testing.T, testname string, buf *Buffer, s string, fub []byte) {
check(t, testname+" (empty 1)", buf, s)
for {
n, err := buf.Read(fub)
if n == 0 {
break
}
if err != nil {
t.Errorf(testname+" (empty 2): err should always be nil, found err == %s", err)
}
s = s[n:]
check(t, testname+" (empty 3)", buf, s)
}
check(t, testname+" (empty 4)", buf, "")
}
func TestBasicOperations(t *testing.T) {
var buf Buffer
for i := 0; i < 5; i++ {
check(t, "TestBasicOperations (1)", &buf, "")
buf.Reset()
check(t, "TestBasicOperations (2)", &buf, "")
buf.Truncate(0)
check(t, "TestBasicOperations (3)", &buf, "")
n, err := buf.Write(testBytes[0:1])
if want := 1; err != nil || n != want {
t.Errorf("Write: got (%d, %v), want (%d, %v)", n, err, want, nil)
}
check(t, "TestBasicOperations (4)", &buf, "a")
buf.WriteByte(testString[1])
check(t, "TestBasicOperations (5)", &buf, "ab")
n, err = buf.Write(testBytes[2:26])
if want := 24; err != nil || n != want {
t.Errorf("Write: got (%d, %v), want (%d, %v)", n, err, want, nil)
}
check(t, "TestBasicOperations (6)", &buf, testString[0:26])
buf.Truncate(26)
check(t, "TestBasicOperations (7)", &buf, testString[0:26])
buf.Truncate(20)
check(t, "TestBasicOperations (8)", &buf, testString[0:20])
empty(t, "TestBasicOperations (9)", &buf, testString[0:20], make([]byte, 5))
empty(t, "TestBasicOperations (10)", &buf, "", make([]byte, 100))
buf.WriteByte(testString[1])
c, err := buf.ReadByte()
if want := testString[1]; err != nil || c != want {
t.Errorf("ReadByte: got (%q, %v), want (%q, %v)", c, err, want, nil)
}
c, err = buf.ReadByte()
if err != io.EOF {
t.Errorf("ReadByte: got (%q, %v), want (%q, %v)", c, err, byte(0), io.EOF)
}
}
}
func TestLargeStringWrites(t *testing.T) {
var buf Buffer
limit := 30
if testing.Short() {
limit = 9
}
for i := 3; i < limit; i += 3 {
s := fillString(t, "TestLargeWrites (1)", &buf, "", 5, testString)
empty(t, "TestLargeStringWrites (2)", &buf, s, make([]byte, len(testString)/i))
}
check(t, "TestLargeStringWrites (3)", &buf, "")
}
func TestLargeByteWrites(t *testing.T) {
var buf Buffer
limit := 30
if testing.Short() {
limit = 9
}
for i := 3; i < limit; i += 3 {
s := fillBytes(t, "TestLargeWrites (1)", &buf, "", 5, testBytes)
empty(t, "TestLargeByteWrites (2)", &buf, s, make([]byte, len(testString)/i))
}
check(t, "TestLargeByteWrites (3)", &buf, "")
}
func TestLargeStringReads(t *testing.T) {
var buf Buffer
for i := 3; i < 30; i += 3 {
s := fillString(t, "TestLargeReads (1)", &buf, "", 5, testString[0:len(testString)/i])
empty(t, "TestLargeReads (2)", &buf, s, make([]byte, len(testString)))
}
check(t, "TestLargeStringReads (3)", &buf, "")
}
func TestLargeByteReads(t *testing.T) {
var buf Buffer
for i := 3; i < 30; i += 3 {
s := fillBytes(t, "TestLargeReads (1)", &buf, "", 5, testBytes[0:len(testBytes)/i])
empty(t, "TestLargeReads (2)", &buf, s, make([]byte, len(testString)))
}
check(t, "TestLargeByteReads (3)", &buf, "")
}
func TestMixedReadsAndWrites(t *testing.T) {
var buf Buffer
s := ""
for i := 0; i < 50; i++ {
wlen := rand.Intn(len(testString))
if i%2 == 0 {
s = fillString(t, "TestMixedReadsAndWrites (1)", &buf, s, 1, testString[0:wlen])
} else {
s = fillBytes(t, "TestMixedReadsAndWrites (1)", &buf, s, 1, testBytes[0:wlen])
}
rlen := rand.Intn(len(testString))
fub := make([]byte, rlen)
n, _ := buf.Read(fub)
s = s[n:]
}
empty(t, "TestMixedReadsAndWrites (2)", &buf, s, make([]byte, buf.Len()))
}
func TestCapWithPreallocatedSlice(t *testing.T) {
buf := NewBuffer(make([]byte, 10))
n := buf.Cap()
if n != 10 {
t.Errorf("expected 10, got %d", n)
}
}
func TestCapWithSliceAndWrittenData(t *testing.T) {
buf := NewBuffer(make([]byte, 0, 10))
buf.Write([]byte("test"))
n := buf.Cap()
if n != 10 {
t.Errorf("expected 10, got %d", n)
}
}
func TestNil(t *testing.T) {
var b *Buffer
if b.String() != "<nil>" {
t.Errorf("expected <nil>; got %q", b.String())
}
}
func TestReadFrom(t *testing.T) {
var buf Buffer
for i := 3; i < 30; i += 3 {
s := fillBytes(t, "TestReadFrom (1)", &buf, "", 5, testBytes[0:len(testBytes)/i])
var b Buffer
b.ReadFrom(&buf)
empty(t, "TestReadFrom (2)", &b, s, make([]byte, len(testString)))
}
}
type panicReader struct{ panic bool }
func (r panicReader) Read(p []byte) (int, error) {
if r.panic {
panic("oops")
}
return 0, io.EOF
}
// Make sure that an empty Buffer remains empty when
// it is "grown" before a Read that panics
func TestReadFromPanicReader(t *testing.T) {
// First verify non-panic behaviour
var buf Buffer
i, err := buf.ReadFrom(panicReader{})
if err != nil {
t.Fatal(err)
}
if i != 0 {
t.Fatalf("unexpected return from bytes.ReadFrom (1): got: %d, want %d", i, 0)
}
check(t, "TestReadFromPanicReader (1)", &buf, "")
// Confirm that when Reader panics, the empty buffer remains empty
var buf2 Buffer
defer func() {
recover()
check(t, "TestReadFromPanicReader (2)", &buf2, "")
}()
buf2.ReadFrom(panicReader{panic: true})
}
func TestReadFromNegativeReader(t *testing.T) {
var b Buffer
defer func() {
switch err := recover().(type) {
case nil:
t.Fatal("bytes.Buffer.ReadFrom didn't panic")
case error:
// this is the error string of errNegativeRead
wantError := "bytes.Buffer: reader returned negative count from Read"
if err.Error() != wantError {
t.Fatalf("recovered panic: got %v, want %v", err.Error(), wantError)
}
default:
t.Fatalf("unexpected panic value: %#v", err)
}
}()
b.ReadFrom(new(negativeReader))
}
func TestWriteTo(t *testing.T) {
var buf Buffer
for i := 3; i < 30; i += 3 {
s := fillBytes(t, "TestWriteTo (1)", &buf, "", 5, testBytes[0:len(testBytes)/i])
var b Buffer
buf.WriteTo(&b)
empty(t, "TestWriteTo (2)", &b, s, make([]byte, len(testString)))
}
}
func TestWriteAppend(t *testing.T) {
var got Buffer
var want []byte
for i := 0; i < 1000; i++ {
b := got.AvailableBuffer()
b = strconv.AppendInt(b, int64(i), 10)
want = strconv.AppendInt(want, int64(i), 10)
got.Write(b)
}
if !Equal(got.Bytes(), want) {
t.Fatalf("Bytes() = %q, want %q", got, want)
}
// With a sufficiently sized buffer, there should be no allocations.
n := testing.AllocsPerRun(100, func() {
got.Reset()
for i := 0; i < 1000; i++ {
b := got.AvailableBuffer()
b = strconv.AppendInt(b, int64(i), 10)
got.Write(b)
}
})
if n > 0 {
t.Errorf("allocations occurred while appending")
}
}
func TestRuneIO(t *testing.T) {
const NRune = 1000
// Built a test slice while we write the data
b := make([]byte, utf8.UTFMax*NRune)
var buf Buffer
n := 0
for r := rune(0); r < NRune; r++ {
size := utf8.EncodeRune(b[n:], r)
nbytes, err := buf.WriteRune(r)
if err != nil {
t.Fatalf("WriteRune(%U) error: %s", r, err)
}
if nbytes != size {
t.Fatalf("WriteRune(%U) expected %d, got %d", r, size, nbytes)
}
n += size
}
b = b[0:n]
// Check the resulting bytes
if !Equal(buf.Bytes(), b) {
t.Fatalf("incorrect result from WriteRune: %q not %q", buf.Bytes(), b)
}
p := make([]byte, utf8.UTFMax)
// Read it back with ReadRune
for r := rune(0); r < NRune; r++ {
size := utf8.EncodeRune(p, r)
nr, nbytes, err := buf.ReadRune()
if nr != r || nbytes != size || err != nil {
t.Fatalf("ReadRune(%U) got %U,%d not %U,%d (err=%s)", r, nr, nbytes, r, size, err)
}
}
// Check that UnreadRune works
buf.Reset()
// check at EOF
if err := buf.UnreadRune(); err == nil {
t.Fatal("UnreadRune at EOF: got no error")
}
if _, _, err := buf.ReadRune(); err == nil {
t.Fatal("ReadRune at EOF: got no error")
}
if err := buf.UnreadRune(); err == nil {
t.Fatal("UnreadRune after ReadRune at EOF: got no error")
}
// check not at EOF
buf.Write(b)
for r := rune(0); r < NRune; r++ {
r1, size, _ := buf.ReadRune()
if err := buf.UnreadRune(); err != nil {
t.Fatalf("UnreadRune(%U) got error %q", r, err)
}
r2, nbytes, err := buf.ReadRune()
if r1 != r2 || r1 != r || nbytes != size || err != nil {
t.Fatalf("ReadRune(%U) after UnreadRune got %U,%d not %U,%d (err=%s)", r, r2, nbytes, r, size, err)
}
}
}
func TestWriteInvalidRune(t *testing.T) {
// Invalid runes, including negative ones, should be written as
// utf8.RuneError.
for _, r := range []rune{-1, utf8.MaxRune + 1} {
var buf Buffer
buf.WriteRune(r)
check(t, fmt.Sprintf("TestWriteInvalidRune (%d)", r), &buf, "\uFFFD")
}
}
func TestNext(t *testing.T) {
b := []byte{0, 1, 2, 3, 4}
tmp := make([]byte, 5)
for i := 0; i <= 5; i++ {
for j := i; j <= 5; j++ {
for k := 0; k <= 6; k++ {
// 0 <= i <= j <= 5; 0 <= k <= 6
// Check that if we start with a buffer
// of length j at offset i and ask for
// Next(k), we get the right bytes.
buf := NewBuffer(b[0:j])
n, _ := buf.Read(tmp[0:i])
if n != i {
t.Fatalf("Read %d returned %d", i, n)
}
bb := buf.Next(k)
want := k
if want > j-i {
want = j - i
}
if len(bb) != want {
t.Fatalf("in %d,%d: len(Next(%d)) == %d", i, j, k, len(bb))
}
for l, v := range bb {
if v != byte(l+i) {
t.Fatalf("in %d,%d: Next(%d)[%d] = %d, want %d", i, j, k, l, v, l+i)
}
}
}
}
}
}
var readBytesTests = []struct {
buffer string
delim byte
expected []string
err error
}{
{"", 0, []string{""}, io.EOF},
{"a\x00", 0, []string{"a\x00"}, nil},
{"abbbaaaba", 'b', []string{"ab", "b", "b", "aaab"}, nil},
{"hello\x01world", 1, []string{"hello\x01"}, nil},
{"foo\nbar", 0, []string{"foo\nbar"}, io.EOF},
{"alpha\nbeta\ngamma\n", '\n', []string{"alpha\n", "beta\n", "gamma\n"}, nil},
{"alpha\nbeta\ngamma", '\n', []string{"alpha\n", "beta\n", "gamma"}, io.EOF},
}
func TestReadBytes(t *testing.T) {
for _, test := range readBytesTests {
buf := NewBufferString(test.buffer)
var err error
for _, expected := range test.expected {
var bytes []byte
bytes, err = buf.ReadBytes(test.delim)
if string(bytes) != expected {
t.Errorf("expected %q, got %q", expected, bytes)
}
if err != nil {
break
}
}
if err != test.err {
t.Errorf("expected error %v, got %v", test.err, err)
}
}
}
func TestReadString(t *testing.T) {
for _, test := range readBytesTests {
buf := NewBufferString(test.buffer)
var err error
for _, expected := range test.expected {
var s string
s, err = buf.ReadString(test.delim)
if s != expected {
t.Errorf("expected %q, got %q", expected, s)
}
if err != nil {
break
}
}
if err != test.err {
t.Errorf("expected error %v, got %v", test.err, err)
}
}
}
func BenchmarkReadString(b *testing.B) {
const n = 32 << 10
data := make([]byte, n)
data[n-1] = 'x'
b.SetBytes(int64(n))
for i := 0; i < b.N; i++ {
buf := NewBuffer(data)
_, err := buf.ReadString('x')
if err != nil {
b.Fatal(err)
}
}
}
func TestGrow(t *testing.T) {
x := []byte{'x'}
y := []byte{'y'}
tmp := make([]byte, 72)
for _, growLen := range []int{0, 100, 1000, 10000, 100000} {
for _, startLen := range []int{0, 100, 1000, 10000, 100000} {
xBytes := Repeat(x, startLen)
buf := NewBuffer(xBytes)
// If we read, this affects buf.off, which is good to test.
readBytes, _ := buf.Read(tmp)
yBytes := Repeat(y, growLen)
allocs := testing.AllocsPerRun(100, func() {
buf.Grow(growLen)
buf.Write(yBytes)
})
// Check no allocation occurs in write, as long as we're single-threaded.
if allocs != 0 {
t.Errorf("allocation occurred during write")
}
// Check that buffer has correct data.
if !Equal(buf.Bytes()[0:startLen-readBytes], xBytes[readBytes:]) {
t.Errorf("bad initial data at %d %d", startLen, growLen)
}
if !Equal(buf.Bytes()[startLen-readBytes:startLen-readBytes+growLen], yBytes) {
t.Errorf("bad written data at %d %d", startLen, growLen)
}
}
}
}
func TestGrowOverflow(t *testing.T) {
defer func() {
if err := recover(); err != ErrTooLarge {
t.Errorf("after too-large Grow, recover() = %v; want %v", err, ErrTooLarge)
}
}()
buf := NewBuffer(make([]byte, 1))
const maxInt = int(^uint(0) >> 1)
buf.Grow(maxInt)
}
// Was a bug: used to give EOF reading empty slice at EOF.
func TestReadEmptyAtEOF(t *testing.T) {
b := new(Buffer)
slice := make([]byte, 0)
n, err := b.Read(slice)
if err != nil {
t.Errorf("read error: %v", err)
}
if n != 0 {
t.Errorf("wrong count; got %d want 0", n)
}
}
func TestUnreadByte(t *testing.T) {
b := new(Buffer)
// check at EOF
if err := b.UnreadByte(); err == nil {
t.Fatal("UnreadByte at EOF: got no error")
}
if _, err := b.ReadByte(); err == nil {
t.Fatal("ReadByte at EOF: got no error")
}
if err := b.UnreadByte(); err == nil {
t.Fatal("UnreadByte after ReadByte at EOF: got no error")
}
// check not at EOF
b.WriteString("abcdefghijklmnopqrstuvwxyz")
// after unsuccessful read
if n, err := b.Read(nil); n != 0 || err != nil {
t.Fatalf("Read(nil) = %d,%v; want 0,nil", n, err)
}
if err := b.UnreadByte(); err == nil {
t.Fatal("UnreadByte after Read(nil): got no error")
}
// after successful read
if _, err := b.ReadBytes('m'); err != nil {
t.Fatalf("ReadBytes: %v", err)
}
if err := b.UnreadByte(); err != nil {
t.Fatalf("UnreadByte: %v", err)
}
c, err := b.ReadByte()
if err != nil {
t.Fatalf("ReadByte: %v", err)
}
if c != 'm' {
t.Errorf("ReadByte = %q; want %q", c, 'm')
}
}
// Tests that we occasionally compact. Issue 5154.
func TestBufferGrowth(t *testing.T) {
var b Buffer
buf := make([]byte, 1024)
b.Write(buf[0:1])
var cap0 int
for i := 0; i < 5<<10; i++ {
b.Write(buf)
b.Read(buf)
if i == 0 {
cap0 = b.Cap()
}
}
cap1 := b.Cap()
// (*Buffer).grow allows for 2x capacity slop before sliding,
// so set our error threshold at 3x.
if cap1 > cap0*3 {
t.Errorf("buffer cap = %d; too big (grew from %d)", cap1, cap0)
}
}
func BenchmarkWriteByte(b *testing.B) {
const n = 4 << 10
b.SetBytes(n)
buf := NewBuffer(make([]byte, n))
for i := 0; i < b.N; i++ {
buf.Reset()
for i := 0; i < n; i++ {
buf.WriteByte('x')
}
}
}
func BenchmarkWriteRune(b *testing.B) {
const n = 4 << 10
const r = '☺'
b.SetBytes(int64(n * utf8.RuneLen(r)))
buf := NewBuffer(make([]byte, n*utf8.UTFMax))
for i := 0; i < b.N; i++ {
buf.Reset()
for i := 0; i < n; i++ {
buf.WriteRune(r)
}
}
}
// From Issue 5154.
func BenchmarkBufferNotEmptyWriteRead(b *testing.B) {
buf := make([]byte, 1024)
for i := 0; i < b.N; i++ {
var b Buffer
b.Write(buf[0:1])
for i := 0; i < 5<<10; i++ {
b.Write(buf)
b.Read(buf)
}
}
}
// Check that we don't compact too often. From Issue 5154.
func BenchmarkBufferFullSmallReads(b *testing.B) {
buf := make([]byte, 1024)
for i := 0; i < b.N; i++ {
var b Buffer
b.Write(buf)
for b.Len()+20 < b.Cap() {
b.Write(buf[:10])
}
for i := 0; i < 5<<10; i++ {
b.Read(buf[:1])
b.Write(buf[:1])
}
}
}
func BenchmarkBufferWriteBlock(b *testing.B) {
block := make([]byte, 1024)
for _, n := range []int{1 << 12, 1 << 16, 1 << 20} {
b.Run(fmt.Sprintf("N%d", n), func(b *testing.B) {
b.ReportAllocs()
for i := 0; i < b.N; i++ {
var bb Buffer
for bb.Len() < n {
bb.Write(block)
}
}
})
}
}
func BenchmarkBufferAppendNoCopy(b *testing.B) {
var bb Buffer
bb.Grow(16 << 20)
b.SetBytes(int64(bb.Available()))
b.ReportAllocs()
for i := 0; i < b.N; i++ {
bb.Reset()
b := bb.AvailableBuffer()
b = b[:cap(b)] // use max capacity to simulate a large append operation
bb.Write(b) // should be nearly infinitely fast
}
}

1384
src/bytes/bytes.go Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,21 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build js && wasm
package bytes_test
import (
"bytes"
"testing"
)
func TestIssue65571(t *testing.T) {
b := make([]byte, 1<<31+1)
b[1<<31] = 1
i := bytes.IndexByte(b, 1)
if i != 1<<31 {
t.Errorf("IndexByte(b, 1) = %d; want %d", i, 1<<31)
}
}

2280
src/bytes/bytes_test.go Normal file

File diff suppressed because it is too large Load Diff

296
src/bytes/compare_test.go Normal file
View File

@@ -0,0 +1,296 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package bytes_test
import (
. "bytes"
"fmt"
"testing"
)
var compareTests = []struct {
a, b []byte
i int
}{
{[]byte(""), []byte(""), 0},
{[]byte("a"), []byte(""), 1},
{[]byte(""), []byte("a"), -1},
{[]byte("abc"), []byte("abc"), 0},
{[]byte("abd"), []byte("abc"), 1},
{[]byte("abc"), []byte("abd"), -1},
{[]byte("ab"), []byte("abc"), -1},
{[]byte("abc"), []byte("ab"), 1},
{[]byte("x"), []byte("ab"), 1},
{[]byte("ab"), []byte("x"), -1},
{[]byte("x"), []byte("a"), 1},
{[]byte("b"), []byte("x"), -1},
// test runtime·memeq's chunked implementation
{[]byte("abcdefgh"), []byte("abcdefgh"), 0},
{[]byte("abcdefghi"), []byte("abcdefghi"), 0},
{[]byte("abcdefghi"), []byte("abcdefghj"), -1},
{[]byte("abcdefghj"), []byte("abcdefghi"), 1},
// nil tests
{nil, nil, 0},
{[]byte(""), nil, 0},
{nil, []byte(""), 0},
{[]byte("a"), nil, 1},
{nil, []byte("a"), -1},
}
func TestCompare(t *testing.T) {
for _, tt := range compareTests {
numShifts := 16
buffer := make([]byte, len(tt.b)+numShifts)
// vary the input alignment of tt.b
for offset := 0; offset <= numShifts; offset++ {
shiftedB := buffer[offset : len(tt.b)+offset]
copy(shiftedB, tt.b)
cmp := Compare(tt.a, shiftedB)
if cmp != tt.i {
t.Errorf(`Compare(%q, %q), offset %d = %v; want %v`, tt.a, tt.b, offset, cmp, tt.i)
}
}
}
}
func TestCompareIdenticalSlice(t *testing.T) {
var b = []byte("Hello Gophers!")
if Compare(b, b) != 0 {
t.Error("b != b")
}
if Compare(b, b[:1]) != 1 {
t.Error("b > b[:1] failed")
}
}
func TestCompareBytes(t *testing.T) {
lengths := make([]int, 0) // lengths to test in ascending order
for i := 0; i <= 128; i++ {
lengths = append(lengths, i)
}
lengths = append(lengths, 256, 512, 1024, 1333, 4095, 4096, 4097)
if !testing.Short() {
lengths = append(lengths, 65535, 65536, 65537, 99999)
}
n := lengths[len(lengths)-1]
a := make([]byte, n+1)
b := make([]byte, n+1)
for _, len := range lengths {
// randomish but deterministic data. No 0 or 255.
for i := 0; i < len; i++ {
a[i] = byte(1 + 31*i%254)
b[i] = byte(1 + 31*i%254)
}
// data past the end is different
for i := len; i <= n; i++ {
a[i] = 8
b[i] = 9
}
cmp := Compare(a[:len], b[:len])
if cmp != 0 {
t.Errorf(`CompareIdentical(%d) = %d`, len, cmp)
}
if len > 0 {
cmp = Compare(a[:len-1], b[:len])
if cmp != -1 {
t.Errorf(`CompareAshorter(%d) = %d`, len, cmp)
}
cmp = Compare(a[:len], b[:len-1])
if cmp != 1 {
t.Errorf(`CompareBshorter(%d) = %d`, len, cmp)
}
}
for k := 0; k < len; k++ {
b[k] = a[k] - 1
cmp = Compare(a[:len], b[:len])
if cmp != 1 {
t.Errorf(`CompareAbigger(%d,%d) = %d`, len, k, cmp)
}
b[k] = a[k] + 1
cmp = Compare(a[:len], b[:len])
if cmp != -1 {
t.Errorf(`CompareBbigger(%d,%d) = %d`, len, k, cmp)
}
b[k] = a[k]
}
}
}
func TestEndianBaseCompare(t *testing.T) {
// This test compares byte slices that are almost identical, except one
// difference that for some j, a[j]>b[j] and a[j+1]<b[j+1]. If the implementation
// compares large chunks with wrong endianness, it gets wrong result.
// no vector register is larger than 512 bytes for now
const maxLength = 512
a := make([]byte, maxLength)
b := make([]byte, maxLength)
// randomish but deterministic data. No 0 or 255.
for i := 0; i < maxLength; i++ {
a[i] = byte(1 + 31*i%254)
b[i] = byte(1 + 31*i%254)
}
for i := 2; i <= maxLength; i <<= 1 {
for j := 0; j < i-1; j++ {
a[j] = b[j] - 1
a[j+1] = b[j+1] + 1
cmp := Compare(a[:i], b[:i])
if cmp != -1 {
t.Errorf(`CompareBbigger(%d,%d) = %d`, i, j, cmp)
}
a[j] = b[j] + 1
a[j+1] = b[j+1] - 1
cmp = Compare(a[:i], b[:i])
if cmp != 1 {
t.Errorf(`CompareAbigger(%d,%d) = %d`, i, j, cmp)
}
a[j] = b[j]
a[j+1] = b[j+1]
}
}
}
func BenchmarkCompareBytesEqual(b *testing.B) {
b1 := []byte("Hello Gophers!")
b2 := []byte("Hello Gophers!")
for i := 0; i < b.N; i++ {
if Compare(b1, b2) != 0 {
b.Fatal("b1 != b2")
}
}
}
func BenchmarkCompareBytesToNil(b *testing.B) {
b1 := []byte("Hello Gophers!")
var b2 []byte
for i := 0; i < b.N; i++ {
if Compare(b1, b2) != 1 {
b.Fatal("b1 > b2 failed")
}
}
}
func BenchmarkCompareBytesEmpty(b *testing.B) {
b1 := []byte("")
b2 := b1
for i := 0; i < b.N; i++ {
if Compare(b1, b2) != 0 {
b.Fatal("b1 != b2")
}
}
}
func BenchmarkCompareBytesIdentical(b *testing.B) {
b1 := []byte("Hello Gophers!")
b2 := b1
for i := 0; i < b.N; i++ {
if Compare(b1, b2) != 0 {
b.Fatal("b1 != b2")
}
}
}
func BenchmarkCompareBytesSameLength(b *testing.B) {
b1 := []byte("Hello Gophers!")
b2 := []byte("Hello, Gophers")
for i := 0; i < b.N; i++ {
if Compare(b1, b2) != -1 {
b.Fatal("b1 < b2 failed")
}
}
}
func BenchmarkCompareBytesDifferentLength(b *testing.B) {
b1 := []byte("Hello Gophers!")
b2 := []byte("Hello, Gophers!")
for i := 0; i < b.N; i++ {
if Compare(b1, b2) != -1 {
b.Fatal("b1 < b2 failed")
}
}
}
func benchmarkCompareBytesBigUnaligned(b *testing.B, offset int) {
b.StopTimer()
b1 := make([]byte, 0, 1<<20)
for len(b1) < 1<<20 {
b1 = append(b1, "Hello Gophers!"...)
}
b2 := append([]byte("12345678")[:offset], b1...)
b.StartTimer()
for j := 0; j < b.N; j++ {
if Compare(b1, b2[offset:]) != 0 {
b.Fatal("b1 != b2")
}
}
b.SetBytes(int64(len(b1)))
}
func BenchmarkCompareBytesBigUnaligned(b *testing.B) {
for i := 1; i < 8; i++ {
b.Run(fmt.Sprintf("offset=%d", i), func(b *testing.B) {
benchmarkCompareBytesBigUnaligned(b, i)
})
}
}
func benchmarkCompareBytesBigBothUnaligned(b *testing.B, offset int) {
b.StopTimer()
pattern := []byte("Hello Gophers!")
b1 := make([]byte, 0, 1<<20+len(pattern))
for len(b1) < 1<<20 {
b1 = append(b1, pattern...)
}
b2 := make([]byte, len(b1))
copy(b2, b1)
b.StartTimer()
for j := 0; j < b.N; j++ {
if Compare(b1[offset:], b2[offset:]) != 0 {
b.Fatal("b1 != b2")
}
}
b.SetBytes(int64(len(b1[offset:])))
}
func BenchmarkCompareBytesBigBothUnaligned(b *testing.B) {
for i := 0; i < 8; i++ {
b.Run(fmt.Sprintf("offset=%d", i), func(b *testing.B) {
benchmarkCompareBytesBigBothUnaligned(b, i)
})
}
}
func BenchmarkCompareBytesBig(b *testing.B) {
b.StopTimer()
b1 := make([]byte, 0, 1<<20)
for len(b1) < 1<<20 {
b1 = append(b1, "Hello Gophers!"...)
}
b2 := append([]byte{}, b1...)
b.StartTimer()
for i := 0; i < b.N; i++ {
if Compare(b1, b2) != 0 {
b.Fatal("b1 != b2")
}
}
b.SetBytes(int64(len(b1)))
}
func BenchmarkCompareBytesBigIdentical(b *testing.B) {
b.StopTimer()
b1 := make([]byte, 0, 1<<20)
for len(b1) < 1<<20 {
b1 = append(b1, "Hello Gophers!"...)
}
b2 := b1
b.StartTimer()
for i := 0; i < b.N; i++ {
if Compare(b1, b2) != 0 {
b.Fatal("b1 != b2")
}
}
b.SetBytes(int64(len(b1)))
}

630
src/bytes/example_test.go Normal file
View File

@@ -0,0 +1,630 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package bytes_test
import (
"bytes"
"encoding/base64"
"fmt"
"io"
"os"
"slices"
"strconv"
"unicode"
)
func ExampleBuffer() {
var b bytes.Buffer // A Buffer needs no initialization.
b.Write([]byte("Hello "))
fmt.Fprintf(&b, "world!")
b.WriteTo(os.Stdout)
// Output: Hello world!
}
func ExampleBuffer_reader() {
// A Buffer can turn a string or a []byte into an io.Reader.
buf := bytes.NewBufferString("R29waGVycyBydWxlIQ==")
dec := base64.NewDecoder(base64.StdEncoding, buf)
io.Copy(os.Stdout, dec)
// Output: Gophers rule!
}
func ExampleBuffer_Bytes() {
buf := bytes.Buffer{}
buf.Write([]byte{'h', 'e', 'l', 'l', 'o', ' ', 'w', 'o', 'r', 'l', 'd'})
os.Stdout.Write(buf.Bytes())
// Output: hello world
}
func ExampleBuffer_AvailableBuffer() {
var buf bytes.Buffer
for i := 0; i < 4; i++ {
b := buf.AvailableBuffer()
b = strconv.AppendInt(b, int64(i), 10)
b = append(b, ' ')
buf.Write(b)
}
os.Stdout.Write(buf.Bytes())
// Output: 0 1 2 3
}
func ExampleBuffer_Cap() {
buf1 := bytes.NewBuffer(make([]byte, 10))
buf2 := bytes.NewBuffer(make([]byte, 0, 10))
fmt.Println(buf1.Cap())
fmt.Println(buf2.Cap())
// Output:
// 10
// 10
}
func ExampleBuffer_Grow() {
var b bytes.Buffer
b.Grow(64)
bb := b.Bytes()
b.Write([]byte("64 bytes or fewer"))
fmt.Printf("%q", bb[:b.Len()])
// Output: "64 bytes or fewer"
}
func ExampleBuffer_Len() {
var b bytes.Buffer
b.Grow(64)
b.Write([]byte("abcde"))
fmt.Printf("%d", b.Len())
// Output: 5
}
func ExampleBuffer_Next() {
var b bytes.Buffer
b.Grow(64)
b.Write([]byte("abcde"))
fmt.Printf("%s\n", b.Next(2))
fmt.Printf("%s\n", b.Next(2))
fmt.Printf("%s", b.Next(2))
// Output:
// ab
// cd
// e
}
func ExampleBuffer_Read() {
var b bytes.Buffer
b.Grow(64)
b.Write([]byte("abcde"))
rdbuf := make([]byte, 1)
n, err := b.Read(rdbuf)
if err != nil {
panic(err)
}
fmt.Println(n)
fmt.Println(b.String())
fmt.Println(string(rdbuf))
// Output:
// 1
// bcde
// a
}
func ExampleBuffer_ReadByte() {
var b bytes.Buffer
b.Grow(64)
b.Write([]byte("abcde"))
c, err := b.ReadByte()
if err != nil {
panic(err)
}
fmt.Println(c)
fmt.Println(b.String())
// Output:
// 97
// bcde
}
func ExampleClone() {
b := []byte("abc")
clone := bytes.Clone(b)
fmt.Printf("%s\n", clone)
clone[0] = 'd'
fmt.Printf("%s\n", b)
fmt.Printf("%s\n", clone)
// Output:
// abc
// abc
// dbc
}
func ExampleCompare() {
// Interpret Compare's result by comparing it to zero.
var a, b []byte
if bytes.Compare(a, b) < 0 {
// a less b
}
if bytes.Compare(a, b) <= 0 {
// a less or equal b
}
if bytes.Compare(a, b) > 0 {
// a greater b
}
if bytes.Compare(a, b) >= 0 {
// a greater or equal b
}
// Prefer Equal to Compare for equality comparisons.
if bytes.Equal(a, b) {
// a equal b
}
if !bytes.Equal(a, b) {
// a not equal b
}
}
func ExampleCompare_search() {
// Binary search to find a matching byte slice.
var needle []byte
var haystack [][]byte // Assume sorted
_, found := slices.BinarySearchFunc(haystack, needle, bytes.Compare)
if found {
// Found it!
}
}
func ExampleContains() {
fmt.Println(bytes.Contains([]byte("seafood"), []byte("foo")))
fmt.Println(bytes.Contains([]byte("seafood"), []byte("bar")))
fmt.Println(bytes.Contains([]byte("seafood"), []byte("")))
fmt.Println(bytes.Contains([]byte(""), []byte("")))
// Output:
// true
// false
// true
// true
}
func ExampleContainsAny() {
fmt.Println(bytes.ContainsAny([]byte("I like seafood."), "fÄo!"))
fmt.Println(bytes.ContainsAny([]byte("I like seafood."), "去是伟大的."))
fmt.Println(bytes.ContainsAny([]byte("I like seafood."), ""))
fmt.Println(bytes.ContainsAny([]byte(""), ""))
// Output:
// true
// true
// false
// false
}
func ExampleContainsRune() {
fmt.Println(bytes.ContainsRune([]byte("I like seafood."), 'f'))
fmt.Println(bytes.ContainsRune([]byte("I like seafood."), 'ö'))
fmt.Println(bytes.ContainsRune([]byte("去是伟大的!"), '大'))
fmt.Println(bytes.ContainsRune([]byte("去是伟大的!"), '!'))
fmt.Println(bytes.ContainsRune([]byte(""), '@'))
// Output:
// true
// false
// true
// true
// false
}
func ExampleContainsFunc() {
f := func(r rune) bool {
return r >= 'a' && r <= 'z'
}
fmt.Println(bytes.ContainsFunc([]byte("HELLO"), f))
fmt.Println(bytes.ContainsFunc([]byte("World"), f))
// Output:
// false
// true
}
func ExampleCount() {
fmt.Println(bytes.Count([]byte("cheese"), []byte("e")))
fmt.Println(bytes.Count([]byte("five"), []byte(""))) // before & after each rune
// Output:
// 3
// 5
}
func ExampleCut() {
show := func(s, sep string) {
before, after, found := bytes.Cut([]byte(s), []byte(sep))
fmt.Printf("Cut(%q, %q) = %q, %q, %v\n", s, sep, before, after, found)
}
show("Gopher", "Go")
show("Gopher", "ph")
show("Gopher", "er")
show("Gopher", "Badger")
// Output:
// Cut("Gopher", "Go") = "", "pher", true
// Cut("Gopher", "ph") = "Go", "er", true
// Cut("Gopher", "er") = "Goph", "", true
// Cut("Gopher", "Badger") = "Gopher", "", false
}
func ExampleCutPrefix() {
show := func(s, sep string) {
after, found := bytes.CutPrefix([]byte(s), []byte(sep))
fmt.Printf("CutPrefix(%q, %q) = %q, %v\n", s, sep, after, found)
}
show("Gopher", "Go")
show("Gopher", "ph")
// Output:
// CutPrefix("Gopher", "Go") = "pher", true
// CutPrefix("Gopher", "ph") = "Gopher", false
}
func ExampleCutSuffix() {
show := func(s, sep string) {
before, found := bytes.CutSuffix([]byte(s), []byte(sep))
fmt.Printf("CutSuffix(%q, %q) = %q, %v\n", s, sep, before, found)
}
show("Gopher", "Go")
show("Gopher", "er")
// Output:
// CutSuffix("Gopher", "Go") = "Gopher", false
// CutSuffix("Gopher", "er") = "Goph", true
}
func ExampleEqual() {
fmt.Println(bytes.Equal([]byte("Go"), []byte("Go")))
fmt.Println(bytes.Equal([]byte("Go"), []byte("C++")))
// Output:
// true
// false
}
func ExampleEqualFold() {
fmt.Println(bytes.EqualFold([]byte("Go"), []byte("go")))
// Output: true
}
func ExampleFields() {
fmt.Printf("Fields are: %q", bytes.Fields([]byte(" foo bar baz ")))
// Output: Fields are: ["foo" "bar" "baz"]
}
func ExampleFieldsFunc() {
f := func(c rune) bool {
return !unicode.IsLetter(c) && !unicode.IsNumber(c)
}
fmt.Printf("Fields are: %q", bytes.FieldsFunc([]byte(" foo1;bar2,baz3..."), f))
// Output: Fields are: ["foo1" "bar2" "baz3"]
}
func ExampleHasPrefix() {
fmt.Println(bytes.HasPrefix([]byte("Gopher"), []byte("Go")))
fmt.Println(bytes.HasPrefix([]byte("Gopher"), []byte("C")))
fmt.Println(bytes.HasPrefix([]byte("Gopher"), []byte("")))
// Output:
// true
// false
// true
}
func ExampleHasSuffix() {
fmt.Println(bytes.HasSuffix([]byte("Amigo"), []byte("go")))
fmt.Println(bytes.HasSuffix([]byte("Amigo"), []byte("O")))
fmt.Println(bytes.HasSuffix([]byte("Amigo"), []byte("Ami")))
fmt.Println(bytes.HasSuffix([]byte("Amigo"), []byte("")))
// Output:
// true
// false
// false
// true
}
func ExampleIndex() {
fmt.Println(bytes.Index([]byte("chicken"), []byte("ken")))
fmt.Println(bytes.Index([]byte("chicken"), []byte("dmr")))
// Output:
// 4
// -1
}
func ExampleIndexByte() {
fmt.Println(bytes.IndexByte([]byte("chicken"), byte('k')))
fmt.Println(bytes.IndexByte([]byte("chicken"), byte('g')))
// Output:
// 4
// -1
}
func ExampleIndexFunc() {
f := func(c rune) bool {
return unicode.Is(unicode.Han, c)
}
fmt.Println(bytes.IndexFunc([]byte("Hello, 世界"), f))
fmt.Println(bytes.IndexFunc([]byte("Hello, world"), f))
// Output:
// 7
// -1
}
func ExampleIndexAny() {
fmt.Println(bytes.IndexAny([]byte("chicken"), "aeiouy"))
fmt.Println(bytes.IndexAny([]byte("crwth"), "aeiouy"))
// Output:
// 2
// -1
}
func ExampleIndexRune() {
fmt.Println(bytes.IndexRune([]byte("chicken"), 'k'))
fmt.Println(bytes.IndexRune([]byte("chicken"), 'd'))
// Output:
// 4
// -1
}
func ExampleJoin() {
s := [][]byte{[]byte("foo"), []byte("bar"), []byte("baz")}
fmt.Printf("%s", bytes.Join(s, []byte(", ")))
// Output: foo, bar, baz
}
func ExampleLastIndex() {
fmt.Println(bytes.Index([]byte("go gopher"), []byte("go")))
fmt.Println(bytes.LastIndex([]byte("go gopher"), []byte("go")))
fmt.Println(bytes.LastIndex([]byte("go gopher"), []byte("rodent")))
// Output:
// 0
// 3
// -1
}
func ExampleLastIndexAny() {
fmt.Println(bytes.LastIndexAny([]byte("go gopher"), "MüQp"))
fmt.Println(bytes.LastIndexAny([]byte("go 地鼠"), "地大"))
fmt.Println(bytes.LastIndexAny([]byte("go gopher"), "z,!."))
// Output:
// 5
// 3
// -1
}
func ExampleLastIndexByte() {
fmt.Println(bytes.LastIndexByte([]byte("go gopher"), byte('g')))
fmt.Println(bytes.LastIndexByte([]byte("go gopher"), byte('r')))
fmt.Println(bytes.LastIndexByte([]byte("go gopher"), byte('z')))
// Output:
// 3
// 8
// -1
}
func ExampleLastIndexFunc() {
fmt.Println(bytes.LastIndexFunc([]byte("go gopher!"), unicode.IsLetter))
fmt.Println(bytes.LastIndexFunc([]byte("go gopher!"), unicode.IsPunct))
fmt.Println(bytes.LastIndexFunc([]byte("go gopher!"), unicode.IsNumber))
// Output:
// 8
// 9
// -1
}
func ExampleMap() {
rot13 := func(r rune) rune {
switch {
case r >= 'A' && r <= 'Z':
return 'A' + (r-'A'+13)%26
case r >= 'a' && r <= 'z':
return 'a' + (r-'a'+13)%26
}
return r
}
fmt.Printf("%s\n", bytes.Map(rot13, []byte("'Twas brillig and the slithy gopher...")))
// Output:
// 'Gjnf oevyyvt naq gur fyvgul tbcure...
}
func ExampleReader_Len() {
fmt.Println(bytes.NewReader([]byte("Hi!")).Len())
fmt.Println(bytes.NewReader([]byte("こんにちは!")).Len())
// Output:
// 3
// 16
}
func ExampleRepeat() {
fmt.Printf("ba%s", bytes.Repeat([]byte("na"), 2))
// Output: banana
}
func ExampleReplace() {
fmt.Printf("%s\n", bytes.Replace([]byte("oink oink oink"), []byte("k"), []byte("ky"), 2))
fmt.Printf("%s\n", bytes.Replace([]byte("oink oink oink"), []byte("oink"), []byte("moo"), -1))
// Output:
// oinky oinky oink
// moo moo moo
}
func ExampleReplaceAll() {
fmt.Printf("%s\n", bytes.ReplaceAll([]byte("oink oink oink"), []byte("oink"), []byte("moo")))
// Output:
// moo moo moo
}
func ExampleRunes() {
rs := bytes.Runes([]byte("go gopher"))
for _, r := range rs {
fmt.Printf("%#U\n", r)
}
// Output:
// U+0067 'g'
// U+006F 'o'
// U+0020 ' '
// U+0067 'g'
// U+006F 'o'
// U+0070 'p'
// U+0068 'h'
// U+0065 'e'
// U+0072 'r'
}
func ExampleSplit() {
fmt.Printf("%q\n", bytes.Split([]byte("a,b,c"), []byte(",")))
fmt.Printf("%q\n", bytes.Split([]byte("a man a plan a canal panama"), []byte("a ")))
fmt.Printf("%q\n", bytes.Split([]byte(" xyz "), []byte("")))
fmt.Printf("%q\n", bytes.Split([]byte(""), []byte("Bernardo O'Higgins")))
// Output:
// ["a" "b" "c"]
// ["" "man " "plan " "canal panama"]
// [" " "x" "y" "z" " "]
// [""]
}
func ExampleSplitN() {
fmt.Printf("%q\n", bytes.SplitN([]byte("a,b,c"), []byte(","), 2))
z := bytes.SplitN([]byte("a,b,c"), []byte(","), 0)
fmt.Printf("%q (nil = %v)\n", z, z == nil)
// Output:
// ["a" "b,c"]
// [] (nil = true)
}
func ExampleSplitAfter() {
fmt.Printf("%q\n", bytes.SplitAfter([]byte("a,b,c"), []byte(",")))
// Output: ["a," "b," "c"]
}
func ExampleSplitAfterN() {
fmt.Printf("%q\n", bytes.SplitAfterN([]byte("a,b,c"), []byte(","), 2))
// Output: ["a," "b,c"]
}
func ExampleTitle() {
fmt.Printf("%s", bytes.Title([]byte("her royal highness")))
// Output: Her Royal Highness
}
func ExampleToTitle() {
fmt.Printf("%s\n", bytes.ToTitle([]byte("loud noises")))
fmt.Printf("%s\n", bytes.ToTitle([]byte("хлеб")))
// Output:
// LOUD NOISES
// ХЛЕБ
}
func ExampleToTitleSpecial() {
str := []byte("ahoj vývojári golang")
totitle := bytes.ToTitleSpecial(unicode.AzeriCase, str)
fmt.Println("Original : " + string(str))
fmt.Println("ToTitle : " + string(totitle))
// Output:
// Original : ahoj vývojári golang
// ToTitle : AHOJ VÝVOJÁRİ GOLANG
}
func ExampleToValidUTF8() {
fmt.Printf("%s\n", bytes.ToValidUTF8([]byte("abc"), []byte("\uFFFD")))
fmt.Printf("%s\n", bytes.ToValidUTF8([]byte("a\xffb\xC0\xAFc\xff"), []byte("")))
fmt.Printf("%s\n", bytes.ToValidUTF8([]byte("\xed\xa0\x80"), []byte("abc")))
// Output:
// abc
// abc
// abc
}
func ExampleTrim() {
fmt.Printf("[%q]", bytes.Trim([]byte(" !!! Achtung! Achtung! !!! "), "! "))
// Output: ["Achtung! Achtung"]
}
func ExampleTrimFunc() {
fmt.Println(string(bytes.TrimFunc([]byte("go-gopher!"), unicode.IsLetter)))
fmt.Println(string(bytes.TrimFunc([]byte("\"go-gopher!\""), unicode.IsLetter)))
fmt.Println(string(bytes.TrimFunc([]byte("go-gopher!"), unicode.IsPunct)))
fmt.Println(string(bytes.TrimFunc([]byte("1234go-gopher!567"), unicode.IsNumber)))
// Output:
// -gopher!
// "go-gopher!"
// go-gopher
// go-gopher!
}
func ExampleTrimLeft() {
fmt.Print(string(bytes.TrimLeft([]byte("453gopher8257"), "0123456789")))
// Output:
// gopher8257
}
func ExampleTrimLeftFunc() {
fmt.Println(string(bytes.TrimLeftFunc([]byte("go-gopher"), unicode.IsLetter)))
fmt.Println(string(bytes.TrimLeftFunc([]byte("go-gopher!"), unicode.IsPunct)))
fmt.Println(string(bytes.TrimLeftFunc([]byte("1234go-gopher!567"), unicode.IsNumber)))
// Output:
// -gopher
// go-gopher!
// go-gopher!567
}
func ExampleTrimPrefix() {
var b = []byte("Goodbye,, world!")
b = bytes.TrimPrefix(b, []byte("Goodbye,"))
b = bytes.TrimPrefix(b, []byte("See ya,"))
fmt.Printf("Hello%s", b)
// Output: Hello, world!
}
func ExampleTrimSpace() {
fmt.Printf("%s", bytes.TrimSpace([]byte(" \t\n a lone gopher \n\t\r\n")))
// Output: a lone gopher
}
func ExampleTrimSuffix() {
var b = []byte("Hello, goodbye, etc!")
b = bytes.TrimSuffix(b, []byte("goodbye, etc!"))
b = bytes.TrimSuffix(b, []byte("gopher"))
b = append(b, bytes.TrimSuffix([]byte("world!"), []byte("x!"))...)
os.Stdout.Write(b)
// Output: Hello, world!
}
func ExampleTrimRight() {
fmt.Print(string(bytes.TrimRight([]byte("453gopher8257"), "0123456789")))
// Output:
// 453gopher
}
func ExampleTrimRightFunc() {
fmt.Println(string(bytes.TrimRightFunc([]byte("go-gopher"), unicode.IsLetter)))
fmt.Println(string(bytes.TrimRightFunc([]byte("go-gopher!"), unicode.IsPunct)))
fmt.Println(string(bytes.TrimRightFunc([]byte("1234go-gopher!567"), unicode.IsNumber)))
// Output:
// go-
// go-gopher
// 1234go-gopher!
}
func ExampleToLower() {
fmt.Printf("%s", bytes.ToLower([]byte("Gopher")))
// Output: gopher
}
func ExampleToLowerSpecial() {
str := []byte("AHOJ VÝVOJÁRİ GOLANG")
totitle := bytes.ToLowerSpecial(unicode.AzeriCase, str)
fmt.Println("Original : " + string(str))
fmt.Println("ToLower : " + string(totitle))
// Output:
// Original : AHOJ VÝVOJÁRİ GOLANG
// ToLower : ahoj vývojári golang
}
func ExampleToUpper() {
fmt.Printf("%s", bytes.ToUpper([]byte("Gopher")))
// Output: GOPHER
}
func ExampleToUpperSpecial() {
str := []byte("ahoj vývojári golang")
totitle := bytes.ToUpperSpecial(unicode.AzeriCase, str)
fmt.Println("Original : " + string(str))
fmt.Println("ToUpper : " + string(totitle))
// Output:
// Original : ahoj vývojári golang
// ToUpper : AHOJ VÝVOJÁRİ GOLANG
}

8
src/bytes/export_test.go Normal file
View File

@@ -0,0 +1,8 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package bytes
// Export func for testing
var IndexBytePortable = indexBytePortable

159
src/bytes/reader.go Normal file
View File

@@ -0,0 +1,159 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package bytes
import (
"errors"
"io"
"unicode/utf8"
)
// A Reader implements the [io.Reader], [io.ReaderAt], [io.WriterTo], [io.Seeker],
// [io.ByteScanner], and [io.RuneScanner] interfaces by reading from
// a byte slice.
// Unlike a [Buffer], a Reader is read-only and supports seeking.
// The zero value for Reader operates like a Reader of an empty slice.
type Reader struct {
s []byte
i int64 // current reading index
prevRune int // index of previous rune; or < 0
}
// Len returns the number of bytes of the unread portion of the
// slice.
func (r *Reader) Len() int {
if r.i >= int64(len(r.s)) {
return 0
}
return int(int64(len(r.s)) - r.i)
}
// Size returns the original length of the underlying byte slice.
// Size is the number of bytes available for reading via [Reader.ReadAt].
// The result is unaffected by any method calls except [Reader.Reset].
func (r *Reader) Size() int64 { return int64(len(r.s)) }
// Read implements the [io.Reader] interface.
func (r *Reader) Read(b []byte) (n int, err error) {
if r.i >= int64(len(r.s)) {
return 0, io.EOF
}
r.prevRune = -1
n = copy(b, r.s[r.i:])
r.i += int64(n)
return
}
// ReadAt implements the [io.ReaderAt] interface.
func (r *Reader) ReadAt(b []byte, off int64) (n int, err error) {
// cannot modify state - see io.ReaderAt
if off < 0 {
return 0, errors.New("bytes.Reader.ReadAt: negative offset")
}
if off >= int64(len(r.s)) {
return 0, io.EOF
}
n = copy(b, r.s[off:])
if n < len(b) {
err = io.EOF
}
return
}
// ReadByte implements the [io.ByteReader] interface.
func (r *Reader) ReadByte() (byte, error) {
r.prevRune = -1
if r.i >= int64(len(r.s)) {
return 0, io.EOF
}
b := r.s[r.i]
r.i++
return b, nil
}
// UnreadByte complements [Reader.ReadByte] in implementing the [io.ByteScanner] interface.
func (r *Reader) UnreadByte() error {
if r.i <= 0 {
return errors.New("bytes.Reader.UnreadByte: at beginning of slice")
}
r.prevRune = -1
r.i--
return nil
}
// ReadRune implements the [io.RuneReader] interface.
func (r *Reader) ReadRune() (ch rune, size int, err error) {
if r.i >= int64(len(r.s)) {
r.prevRune = -1
return 0, 0, io.EOF
}
r.prevRune = int(r.i)
if c := r.s[r.i]; c < utf8.RuneSelf {
r.i++
return rune(c), 1, nil
}
ch, size = utf8.DecodeRune(r.s[r.i:])
r.i += int64(size)
return
}
// UnreadRune complements [Reader.ReadRune] in implementing the [io.RuneScanner] interface.
func (r *Reader) UnreadRune() error {
if r.i <= 0 {
return errors.New("bytes.Reader.UnreadRune: at beginning of slice")
}
if r.prevRune < 0 {
return errors.New("bytes.Reader.UnreadRune: previous operation was not ReadRune")
}
r.i = int64(r.prevRune)
r.prevRune = -1
return nil
}
// Seek implements the [io.Seeker] interface.
func (r *Reader) Seek(offset int64, whence int) (int64, error) {
r.prevRune = -1
var abs int64
switch whence {
case io.SeekStart:
abs = offset
case io.SeekCurrent:
abs = r.i + offset
case io.SeekEnd:
abs = int64(len(r.s)) + offset
default:
return 0, errors.New("bytes.Reader.Seek: invalid whence")
}
if abs < 0 {
return 0, errors.New("bytes.Reader.Seek: negative position")
}
r.i = abs
return abs, nil
}
// WriteTo implements the [io.WriterTo] interface.
func (r *Reader) WriteTo(w io.Writer) (n int64, err error) {
r.prevRune = -1
if r.i >= int64(len(r.s)) {
return 0, nil
}
b := r.s[r.i:]
m, err := w.Write(b)
if m > len(b) {
panic("bytes.Reader.WriteTo: invalid Write count")
}
r.i += int64(m)
n = int64(m)
if m != len(b) && err == nil {
err = io.ErrShortWrite
}
return
}
// Reset resets the [Reader] to be reading from b.
func (r *Reader) Reset(b []byte) { *r = Reader{b, 0, -1} }
// NewReader returns a new [Reader] reading from b.
func NewReader(b []byte) *Reader { return &Reader{b, 0, -1} }

319
src/bytes/reader_test.go Normal file
View File

@@ -0,0 +1,319 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package bytes_test
import (
. "bytes"
"fmt"
"io"
"sync"
"testing"
)
func TestReader(t *testing.T) {
r := NewReader([]byte("0123456789"))
tests := []struct {
off int64
seek int
n int
want string
wantpos int64
readerr error
seekerr string
}{
{seek: io.SeekStart, off: 0, n: 20, want: "0123456789"},
{seek: io.SeekStart, off: 1, n: 1, want: "1"},
{seek: io.SeekCurrent, off: 1, wantpos: 3, n: 2, want: "34"},
{seek: io.SeekStart, off: -1, seekerr: "bytes.Reader.Seek: negative position"},
{seek: io.SeekStart, off: 1 << 33, wantpos: 1 << 33, readerr: io.EOF},
{seek: io.SeekCurrent, off: 1, wantpos: 1<<33 + 1, readerr: io.EOF},
{seek: io.SeekStart, n: 5, want: "01234"},
{seek: io.SeekCurrent, n: 5, want: "56789"},
{seek: io.SeekEnd, off: -1, n: 1, wantpos: 9, want: "9"},
}
for i, tt := range tests {
pos, err := r.Seek(tt.off, tt.seek)
if err == nil && tt.seekerr != "" {
t.Errorf("%d. want seek error %q", i, tt.seekerr)
continue
}
if err != nil && err.Error() != tt.seekerr {
t.Errorf("%d. seek error = %q; want %q", i, err.Error(), tt.seekerr)
continue
}
if tt.wantpos != 0 && tt.wantpos != pos {
t.Errorf("%d. pos = %d, want %d", i, pos, tt.wantpos)
}
buf := make([]byte, tt.n)
n, err := r.Read(buf)
if err != tt.readerr {
t.Errorf("%d. read = %v; want %v", i, err, tt.readerr)
continue
}
got := string(buf[:n])
if got != tt.want {
t.Errorf("%d. got %q; want %q", i, got, tt.want)
}
}
}
func TestReadAfterBigSeek(t *testing.T) {
r := NewReader([]byte("0123456789"))
if _, err := r.Seek(1<<31+5, io.SeekStart); err != nil {
t.Fatal(err)
}
if n, err := r.Read(make([]byte, 10)); n != 0 || err != io.EOF {
t.Errorf("Read = %d, %v; want 0, EOF", n, err)
}
}
func TestReaderAt(t *testing.T) {
r := NewReader([]byte("0123456789"))
tests := []struct {
off int64
n int
want string
wanterr any
}{
{0, 10, "0123456789", nil},
{1, 10, "123456789", io.EOF},
{1, 9, "123456789", nil},
{11, 10, "", io.EOF},
{0, 0, "", nil},
{-1, 0, "", "bytes.Reader.ReadAt: negative offset"},
}
for i, tt := range tests {
b := make([]byte, tt.n)
rn, err := r.ReadAt(b, tt.off)
got := string(b[:rn])
if got != tt.want {
t.Errorf("%d. got %q; want %q", i, got, tt.want)
}
if fmt.Sprintf("%v", err) != fmt.Sprintf("%v", tt.wanterr) {
t.Errorf("%d. got error = %v; want %v", i, err, tt.wanterr)
}
}
}
func TestReaderAtConcurrent(t *testing.T) {
// Test for the race detector, to verify ReadAt doesn't mutate
// any state.
r := NewReader([]byte("0123456789"))
var wg sync.WaitGroup
for i := 0; i < 5; i++ {
wg.Add(1)
go func(i int) {
defer wg.Done()
var buf [1]byte
r.ReadAt(buf[:], int64(i))
}(i)
}
wg.Wait()
}
func TestEmptyReaderConcurrent(t *testing.T) {
// Test for the race detector, to verify a Read that doesn't yield any bytes
// is okay to use from multiple goroutines. This was our historic behavior.
// See golang.org/issue/7856
r := NewReader([]byte{})
var wg sync.WaitGroup
for i := 0; i < 5; i++ {
wg.Add(2)
go func() {
defer wg.Done()
var buf [1]byte
r.Read(buf[:])
}()
go func() {
defer wg.Done()
r.Read(nil)
}()
}
wg.Wait()
}
func TestReaderWriteTo(t *testing.T) {
for i := 0; i < 30; i += 3 {
var l int
if i > 0 {
l = len(testString) / i
}
s := testString[:l]
r := NewReader(testBytes[:l])
var b Buffer
n, err := r.WriteTo(&b)
if expect := int64(len(s)); n != expect {
t.Errorf("got %v; want %v", n, expect)
}
if err != nil {
t.Errorf("for length %d: got error = %v; want nil", l, err)
}
if b.String() != s {
t.Errorf("got string %q; want %q", b.String(), s)
}
if r.Len() != 0 {
t.Errorf("reader contains %v bytes; want 0", r.Len())
}
}
}
func TestReaderLen(t *testing.T) {
const data = "hello world"
r := NewReader([]byte(data))
if got, want := r.Len(), 11; got != want {
t.Errorf("r.Len(): got %d, want %d", got, want)
}
if n, err := r.Read(make([]byte, 10)); err != nil || n != 10 {
t.Errorf("Read failed: read %d %v", n, err)
}
if got, want := r.Len(), 1; got != want {
t.Errorf("r.Len(): got %d, want %d", got, want)
}
if n, err := r.Read(make([]byte, 1)); err != nil || n != 1 {
t.Errorf("Read failed: read %d %v; want 1, nil", n, err)
}
if got, want := r.Len(), 0; got != want {
t.Errorf("r.Len(): got %d, want %d", got, want)
}
}
var UnreadRuneErrorTests = []struct {
name string
f func(*Reader)
}{
{"Read", func(r *Reader) { r.Read([]byte{0}) }},
{"ReadByte", func(r *Reader) { r.ReadByte() }},
{"UnreadRune", func(r *Reader) { r.UnreadRune() }},
{"Seek", func(r *Reader) { r.Seek(0, io.SeekCurrent) }},
{"WriteTo", func(r *Reader) { r.WriteTo(&Buffer{}) }},
}
func TestUnreadRuneError(t *testing.T) {
for _, tt := range UnreadRuneErrorTests {
reader := NewReader([]byte("0123456789"))
if _, _, err := reader.ReadRune(); err != nil {
// should not happen
t.Fatal(err)
}
tt.f(reader)
err := reader.UnreadRune()
if err == nil {
t.Errorf("Unreading after %s: expected error", tt.name)
}
}
}
func TestReaderDoubleUnreadRune(t *testing.T) {
buf := NewBuffer([]byte("groucho"))
if _, _, err := buf.ReadRune(); err != nil {
// should not happen
t.Fatal(err)
}
if err := buf.UnreadByte(); err != nil {
// should not happen
t.Fatal(err)
}
if err := buf.UnreadByte(); err == nil {
t.Fatal("UnreadByte: expected error, got nil")
}
}
// verify that copying from an empty reader always has the same results,
// regardless of the presence of a WriteTo method.
func TestReaderCopyNothing(t *testing.T) {
type nErr struct {
n int64
err error
}
type justReader struct {
io.Reader
}
type justWriter struct {
io.Writer
}
discard := justWriter{io.Discard} // hide ReadFrom
var with, withOut nErr
with.n, with.err = io.Copy(discard, NewReader(nil))
withOut.n, withOut.err = io.Copy(discard, justReader{NewReader(nil)})
if with != withOut {
t.Errorf("behavior differs: with = %#v; without: %#v", with, withOut)
}
}
// tests that Len is affected by reads, but Size is not.
func TestReaderLenSize(t *testing.T) {
r := NewReader([]byte("abc"))
io.CopyN(io.Discard, r, 1)
if r.Len() != 2 {
t.Errorf("Len = %d; want 2", r.Len())
}
if r.Size() != 3 {
t.Errorf("Size = %d; want 3", r.Size())
}
}
func TestReaderReset(t *testing.T) {
r := NewReader([]byte("世界"))
if _, _, err := r.ReadRune(); err != nil {
t.Errorf("ReadRune: unexpected error: %v", err)
}
const want = "abcdef"
r.Reset([]byte(want))
if err := r.UnreadRune(); err == nil {
t.Errorf("UnreadRune: expected error, got nil")
}
buf, err := io.ReadAll(r)
if err != nil {
t.Errorf("ReadAll: unexpected error: %v", err)
}
if got := string(buf); got != want {
t.Errorf("ReadAll: got %q, want %q", got, want)
}
}
func TestReaderZero(t *testing.T) {
if l := (&Reader{}).Len(); l != 0 {
t.Errorf("Len: got %d, want 0", l)
}
if n, err := (&Reader{}).Read(nil); n != 0 || err != io.EOF {
t.Errorf("Read: got %d, %v; want 0, io.EOF", n, err)
}
if n, err := (&Reader{}).ReadAt(nil, 11); n != 0 || err != io.EOF {
t.Errorf("ReadAt: got %d, %v; want 0, io.EOF", n, err)
}
if b, err := (&Reader{}).ReadByte(); b != 0 || err != io.EOF {
t.Errorf("ReadByte: got %d, %v; want 0, io.EOF", b, err)
}
if ch, size, err := (&Reader{}).ReadRune(); ch != 0 || size != 0 || err != io.EOF {
t.Errorf("ReadRune: got %d, %d, %v; want 0, 0, io.EOF", ch, size, err)
}
if offset, err := (&Reader{}).Seek(11, io.SeekStart); offset != 11 || err != nil {
t.Errorf("Seek: got %d, %v; want 11, nil", offset, err)
}
if s := (&Reader{}).Size(); s != 0 {
t.Errorf("Size: got %d, want 0", s)
}
if (&Reader{}).UnreadByte() == nil {
t.Errorf("UnreadByte: got nil, want error")
}
if (&Reader{}).UnreadRune() == nil {
t.Errorf("UnreadRune: got nil, want error")
}
if n, err := (&Reader{}).WriteTo(io.Discard); n != 0 || err != nil {
t.Errorf("WriteTo: got %d, %v; want 0, nil", n, err)
}
}