Involved Source Filesbuffer.go
Package bytes implements functions for the manipulation of byte slices.
It is analogous to the facilities of the strings package.
reader.go
Code Examples
package main
import (
"bytes"
"fmt"
"os"
)
func main() {
var b bytes.Buffer // A Buffer needs no initialization.
b.Write([]byte("Hello "))
fmt.Fprintf(&b, "world!")
b.WriteTo(os.Stdout)
}
package main
import (
"bytes"
"os"
)
func main() {
buf := bytes.Buffer{}
buf.Write([]byte{'h', 'e', 'l', 'l', 'o', ' ', 'w', 'o', 'r', 'l', 'd'})
os.Stdout.Write(buf.Bytes())
}
package main
import (
"bytes"
"fmt"
)
func main() {
var b bytes.Buffer
b.Grow(64)
bb := b.Bytes()
b.Write([]byte("64 bytes or fewer"))
fmt.Printf("%q", bb[:b.Len()])
}
package main
import (
"bytes"
"fmt"
)
func main() {
var b bytes.Buffer
b.Grow(64)
b.Write([]byte("abcde"))
fmt.Printf("%d", b.Len())
}
package main
import (
"bytes"
"encoding/base64"
"io"
"os"
)
func main() {
// A Buffer can turn a string or a []byte into an io.Reader.
buf := bytes.NewBufferString("R29waGVycyBydWxlIQ==")
dec := base64.NewDecoder(base64.StdEncoding, buf)
io.Copy(os.Stdout, dec)
}
package main
import (
"bytes"
)
func main() {
// Interpret Compare's result by comparing it to zero.
var a, b []byte
if bytes.Compare(a, b) < 0 {
// a less b
}
if bytes.Compare(a, b) <= 0 {
// a less or equal b
}
if bytes.Compare(a, b) > 0 {
// a greater b
}
if bytes.Compare(a, b) >= 0 {
// a greater or equal b
}
// Prefer Equal to Compare for equality comparisons.
if bytes.Equal(a, b) {
// a equal b
}
if !bytes.Equal(a, b) {
// a not equal b
}
}
package main
import (
"bytes"
"sort"
)
func main() {
// Binary search to find a matching byte slice.
var needle []byte
var haystack [][]byte // Assume sorted
i := sort.Search(len(haystack), func(i int) bool {
// Return haystack[i] >= needle.
return bytes.Compare(haystack[i], needle) >= 0
})
if i < len(haystack) && bytes.Equal(haystack[i], needle) {
// Found it!
}
}
package main
import (
"bytes"
"fmt"
)
func main() {
fmt.Println(bytes.Contains([]byte("seafood"), []byte("foo")))
fmt.Println(bytes.Contains([]byte("seafood"), []byte("bar")))
fmt.Println(bytes.Contains([]byte("seafood"), []byte("")))
fmt.Println(bytes.Contains([]byte(""), []byte("")))
}
package main
import (
"bytes"
"fmt"
)
func main() {
fmt.Println(bytes.ContainsAny([]byte("I like seafood."), "fÄo!"))
fmt.Println(bytes.ContainsAny([]byte("I like seafood."), "去是伟大的."))
fmt.Println(bytes.ContainsAny([]byte("I like seafood."), ""))
fmt.Println(bytes.ContainsAny([]byte(""), ""))
}
package main
import (
"bytes"
"fmt"
)
func main() {
fmt.Println(bytes.ContainsRune([]byte("I like seafood."), 'f'))
fmt.Println(bytes.ContainsRune([]byte("I like seafood."), 'ö'))
fmt.Println(bytes.ContainsRune([]byte("去是伟大的!"), '大'))
fmt.Println(bytes.ContainsRune([]byte("去是伟大的!"), '!'))
fmt.Println(bytes.ContainsRune([]byte(""), '@'))
}
package main
import (
"bytes"
"fmt"
)
func main() {
fmt.Println(bytes.Count([]byte("cheese"), []byte("e")))
fmt.Println(bytes.Count([]byte("five"), []byte(""))) // before & after each rune
}
package main
import (
"bytes"
"fmt"
)
func main() {
fmt.Println(bytes.Equal([]byte("Go"), []byte("Go")))
fmt.Println(bytes.Equal([]byte("Go"), []byte("C++")))
}
package main
import (
"bytes"
"fmt"
)
func main() {
fmt.Println(bytes.EqualFold([]byte("Go"), []byte("go")))
}
package main
import (
"bytes"
"fmt"
)
func main() {
fmt.Printf("Fields are: %q", bytes.Fields([]byte(" foo bar baz ")))
}
package main
import (
"bytes"
"fmt"
"unicode"
)
func main() {
f := func(c rune) bool {
return !unicode.IsLetter(c) && !unicode.IsNumber(c)
}
fmt.Printf("Fields are: %q", bytes.FieldsFunc([]byte(" foo1;bar2,baz3..."), f))
}
package main
import (
"bytes"
"fmt"
)
func main() {
fmt.Println(bytes.HasPrefix([]byte("Gopher"), []byte("Go")))
fmt.Println(bytes.HasPrefix([]byte("Gopher"), []byte("C")))
fmt.Println(bytes.HasPrefix([]byte("Gopher"), []byte("")))
}
package main
import (
"bytes"
"fmt"
)
func main() {
fmt.Println(bytes.HasSuffix([]byte("Amigo"), []byte("go")))
fmt.Println(bytes.HasSuffix([]byte("Amigo"), []byte("O")))
fmt.Println(bytes.HasSuffix([]byte("Amigo"), []byte("Ami")))
fmt.Println(bytes.HasSuffix([]byte("Amigo"), []byte("")))
}
package main
import (
"bytes"
"fmt"
)
func main() {
fmt.Println(bytes.Index([]byte("chicken"), []byte("ken")))
fmt.Println(bytes.Index([]byte("chicken"), []byte("dmr")))
}
package main
import (
"bytes"
"fmt"
)
func main() {
fmt.Println(bytes.IndexAny([]byte("chicken"), "aeiouy"))
fmt.Println(bytes.IndexAny([]byte("crwth"), "aeiouy"))
}
package main
import (
"bytes"
"fmt"
)
func main() {
fmt.Println(bytes.IndexByte([]byte("chicken"), byte('k')))
fmt.Println(bytes.IndexByte([]byte("chicken"), byte('g')))
}
package main
import (
"bytes"
"fmt"
"unicode"
)
func main() {
f := func(c rune) bool {
return unicode.Is(unicode.Han, c)
}
fmt.Println(bytes.IndexFunc([]byte("Hello, 世界"), f))
fmt.Println(bytes.IndexFunc([]byte("Hello, world"), f))
}
package main
import (
"bytes"
"fmt"
)
func main() {
fmt.Println(bytes.IndexRune([]byte("chicken"), 'k'))
fmt.Println(bytes.IndexRune([]byte("chicken"), 'd'))
}
package main
import (
"bytes"
"fmt"
)
func main() {
s := [][]byte{[]byte("foo"), []byte("bar"), []byte("baz")}
fmt.Printf("%s", bytes.Join(s, []byte(", ")))
}
package main
import (
"bytes"
"fmt"
)
func main() {
fmt.Println(bytes.Index([]byte("go gopher"), []byte("go")))
fmt.Println(bytes.LastIndex([]byte("go gopher"), []byte("go")))
fmt.Println(bytes.LastIndex([]byte("go gopher"), []byte("rodent")))
}
package main
import (
"bytes"
"fmt"
)
func main() {
fmt.Println(bytes.LastIndexAny([]byte("go gopher"), "MüQp"))
fmt.Println(bytes.LastIndexAny([]byte("go 地鼠"), "地大"))
fmt.Println(bytes.LastIndexAny([]byte("go gopher"), "z,!."))
}
package main
import (
"bytes"
"fmt"
)
func main() {
fmt.Println(bytes.LastIndexByte([]byte("go gopher"), byte('g')))
fmt.Println(bytes.LastIndexByte([]byte("go gopher"), byte('r')))
fmt.Println(bytes.LastIndexByte([]byte("go gopher"), byte('z')))
}
package main
import (
"bytes"
"fmt"
"unicode"
)
func main() {
fmt.Println(bytes.LastIndexFunc([]byte("go gopher!"), unicode.IsLetter))
fmt.Println(bytes.LastIndexFunc([]byte("go gopher!"), unicode.IsPunct))
fmt.Println(bytes.LastIndexFunc([]byte("go gopher!"), unicode.IsNumber))
}
package main
import (
"bytes"
"fmt"
)
func main() {
rot13 := func(r rune) rune {
switch {
case r >= 'A' && r <= 'Z':
return 'A' + (r-'A'+13)%26
case r >= 'a' && r <= 'z':
return 'a' + (r-'a'+13)%26
}
return r
}
fmt.Printf("%s", bytes.Map(rot13, []byte("'Twas brillig and the slithy gopher...")))
}
package main
import (
"bytes"
"fmt"
)
func main() {
fmt.Println(bytes.NewReader([]byte("Hi!")).Len())
fmt.Println(bytes.NewReader([]byte("こんにちは!")).Len())
}
package main
import (
"bytes"
"fmt"
)
func main() {
fmt.Printf("ba%s", bytes.Repeat([]byte("na"), 2))
}
package main
import (
"bytes"
"fmt"
)
func main() {
fmt.Printf("%s\n", bytes.Replace([]byte("oink oink oink"), []byte("k"), []byte("ky"), 2))
fmt.Printf("%s\n", bytes.Replace([]byte("oink oink oink"), []byte("oink"), []byte("moo"), -1))
}
package main
import (
"bytes"
"fmt"
)
func main() {
fmt.Printf("%s\n", bytes.ReplaceAll([]byte("oink oink oink"), []byte("oink"), []byte("moo")))
}
package main
import (
"bytes"
"fmt"
)
func main() {
rs := bytes.Runes([]byte("go gopher"))
for _, r := range rs {
fmt.Printf("%#U\n", r)
}
}
package main
import (
"bytes"
"fmt"
)
func main() {
fmt.Printf("%q\n", bytes.Split([]byte("a,b,c"), []byte(",")))
fmt.Printf("%q\n", bytes.Split([]byte("a man a plan a canal panama"), []byte("a ")))
fmt.Printf("%q\n", bytes.Split([]byte(" xyz "), []byte("")))
fmt.Printf("%q\n", bytes.Split([]byte(""), []byte("Bernardo O'Higgins")))
}
package main
import (
"bytes"
"fmt"
)
func main() {
fmt.Printf("%q\n", bytes.SplitAfter([]byte("a,b,c"), []byte(",")))
}
package main
import (
"bytes"
"fmt"
)
func main() {
fmt.Printf("%q\n", bytes.SplitAfterN([]byte("a,b,c"), []byte(","), 2))
}
package main
import (
"bytes"
"fmt"
)
func main() {
fmt.Printf("%q\n", bytes.SplitN([]byte("a,b,c"), []byte(","), 2))
z := bytes.SplitN([]byte("a,b,c"), []byte(","), 0)
fmt.Printf("%q (nil = %v)\n", z, z == nil)
}
package main
import (
"bytes"
"fmt"
)
func main() {
fmt.Printf("%s", bytes.Title([]byte("her royal highness")))
}
package main
import (
"bytes"
"fmt"
)
func main() {
fmt.Printf("%s", bytes.ToLower([]byte("Gopher")))
}
package main
import (
"bytes"
"fmt"
"unicode"
)
func main() {
str := []byte("AHOJ VÝVOJÁRİ GOLANG")
totitle := bytes.ToLowerSpecial(unicode.AzeriCase, str)
fmt.Println("Original : " + string(str))
fmt.Println("ToLower : " + string(totitle))
}
package main
import (
"bytes"
"fmt"
)
func main() {
fmt.Printf("%s\n", bytes.ToTitle([]byte("loud noises")))
fmt.Printf("%s\n", bytes.ToTitle([]byte("хлеб")))
}
package main
import (
"bytes"
"fmt"
"unicode"
)
func main() {
str := []byte("ahoj vývojári golang")
totitle := bytes.ToTitleSpecial(unicode.AzeriCase, str)
fmt.Println("Original : " + string(str))
fmt.Println("ToTitle : " + string(totitle))
}
package main
import (
"bytes"
"fmt"
)
func main() {
fmt.Printf("%s", bytes.ToUpper([]byte("Gopher")))
}
package main
import (
"bytes"
"fmt"
"unicode"
)
func main() {
str := []byte("ahoj vývojári golang")
totitle := bytes.ToUpperSpecial(unicode.AzeriCase, str)
fmt.Println("Original : " + string(str))
fmt.Println("ToUpper : " + string(totitle))
}
package main
import (
"bytes"
"fmt"
)
func main() {
fmt.Printf("[%q]", bytes.Trim([]byte(" !!! Achtung! Achtung! !!! "), "! "))
}
package main
import (
"bytes"
"fmt"
"unicode"
)
func main() {
fmt.Println(string(bytes.TrimFunc([]byte("go-gopher!"), unicode.IsLetter)))
fmt.Println(string(bytes.TrimFunc([]byte("\"go-gopher!\""), unicode.IsLetter)))
fmt.Println(string(bytes.TrimFunc([]byte("go-gopher!"), unicode.IsPunct)))
fmt.Println(string(bytes.TrimFunc([]byte("1234go-gopher!567"), unicode.IsNumber)))
}
package main
import (
"bytes"
"fmt"
)
func main() {
fmt.Print(string(bytes.TrimLeft([]byte("453gopher8257"), "0123456789")))
}
package main
import (
"bytes"
"fmt"
"unicode"
)
func main() {
fmt.Println(string(bytes.TrimLeftFunc([]byte("go-gopher"), unicode.IsLetter)))
fmt.Println(string(bytes.TrimLeftFunc([]byte("go-gopher!"), unicode.IsPunct)))
fmt.Println(string(bytes.TrimLeftFunc([]byte("1234go-gopher!567"), unicode.IsNumber)))
}
package main
import (
"bytes"
"fmt"
)
func main() {
var b = []byte("Goodbye,, world!")
b = bytes.TrimPrefix(b, []byte("Goodbye,"))
b = bytes.TrimPrefix(b, []byte("See ya,"))
fmt.Printf("Hello%s", b)
}
package main
import (
"bytes"
"fmt"
)
func main() {
fmt.Print(string(bytes.TrimRight([]byte("453gopher8257"), "0123456789")))
}
package main
import (
"bytes"
"fmt"
"unicode"
)
func main() {
fmt.Println(string(bytes.TrimRightFunc([]byte("go-gopher"), unicode.IsLetter)))
fmt.Println(string(bytes.TrimRightFunc([]byte("go-gopher!"), unicode.IsPunct)))
fmt.Println(string(bytes.TrimRightFunc([]byte("1234go-gopher!567"), unicode.IsNumber)))
}
package main
import (
"bytes"
"fmt"
)
func main() {
fmt.Printf("%s", bytes.TrimSpace([]byte(" \t\n a lone gopher \n\t\r\n")))
}
package main
import (
"bytes"
"os"
)
func main() {
var b = []byte("Hello, goodbye, etc!")
b = bytes.TrimSuffix(b, []byte("goodbye, etc!"))
b = bytes.TrimSuffix(b, []byte("gopher"))
b = append(b, bytes.TrimSuffix([]byte("world!"), []byte("x!"))...)
os.Stdout.Write(b)
}
Package-Level Type Names (total 4, in which 2 are exported)
/* sort exporteds by: | */
A Buffer is a variable-sized buffer of bytes with Read and Write methods.
The zero value for Buffer is an empty buffer ready to use.
// contents are the bytes buf[off : len(buf)]
// last read operation, so that Unread* can work correctly.
// read at &buf[off], write at &buf[len(buf)]
Bytes returns a slice of length b.Len() holding the unread portion of the buffer.
The slice is valid for use only until the next buffer modification (that is,
only until the next call to a method like Read, Write, Reset, or Truncate).
The slice aliases the buffer content at least until the next buffer modification,
so immediate changes to the slice will affect the result of future reads.
Cap returns the capacity of the buffer's underlying byte slice, that is, the
total space allocated for the buffer's data.
Grow grows the buffer's capacity, if necessary, to guarantee space for
another n bytes. After Grow(n), at least n bytes can be written to the
buffer without another allocation.
If n is negative, Grow will panic.
If the buffer can't grow it will panic with ErrTooLarge.
Len returns the number of bytes of the unread portion of the buffer;
b.Len() == len(b.Bytes()).
Next returns a slice containing the next n bytes from the buffer,
advancing the buffer as if the bytes had been returned by Read.
If there are fewer than n bytes in the buffer, Next returns the entire buffer.
The slice is only valid until the next call to a read or write method.
Read reads the next len(p) bytes from the buffer or until the buffer
is drained. The return value n is the number of bytes read. If the
buffer has no data to return, err is io.EOF (unless len(p) is zero);
otherwise it is nil.
ReadByte reads and returns the next byte from the buffer.
If no byte is available, it returns error io.EOF.
ReadBytes reads until the first occurrence of delim in the input,
returning a slice containing the data up to and including the delimiter.
If ReadBytes encounters an error before finding a delimiter,
it returns the data read before the error and the error itself (often io.EOF).
ReadBytes returns err != nil if and only if the returned data does not end in
delim.
ReadFrom reads data from r until EOF and appends it to the buffer, growing
the buffer as needed. The return value n is the number of bytes read. Any
error except io.EOF encountered during the read is also returned. If the
buffer becomes too large, ReadFrom will panic with ErrTooLarge.
ReadRune reads and returns the next UTF-8-encoded
Unicode code point from the buffer.
If no bytes are available, the error returned is io.EOF.
If the bytes are an erroneous UTF-8 encoding, it
consumes one byte and returns U+FFFD, 1.
ReadString reads until the first occurrence of delim in the input,
returning a string containing the data up to and including the delimiter.
If ReadString encounters an error before finding a delimiter,
it returns the data read before the error and the error itself (often io.EOF).
ReadString returns err != nil if and only if the returned data does not end
in delim.
Reset resets the buffer to be empty,
but it retains the underlying storage for use by future writes.
Reset is the same as Truncate(0).
String returns the contents of the unread portion of the buffer
as a string. If the Buffer is a nil pointer, it returns "<nil>".
To build strings more efficiently, see the strings.Builder type.
Truncate discards all but the first n unread bytes from the buffer
but continues to use the same allocated storage.
It panics if n is negative or greater than the length of the buffer.
UnreadByte unreads the last byte returned by the most recent successful
read operation that read at least one byte. If a write has happened since
the last read, if the last read returned an error, or if the read read zero
bytes, UnreadByte returns an error.
UnreadRune unreads the last rune returned by ReadRune.
If the most recent read or write operation on the buffer was
not a successful ReadRune, UnreadRune returns an error. (In this regard
it is stricter than UnreadByte, which will unread the last byte
from any read operation.)
Write appends the contents of p to the buffer, growing the buffer as
needed. The return value n is the length of p; err is always nil. If the
buffer becomes too large, Write will panic with ErrTooLarge.
WriteByte appends the byte c to the buffer, growing the buffer as needed.
The returned error is always nil, but is included to match bufio.Writer's
WriteByte. If the buffer becomes too large, WriteByte will panic with
ErrTooLarge.
WriteRune appends the UTF-8 encoding of Unicode code point r to the
buffer, returning its length and an error, which is always nil but is
included to match bufio.Writer's WriteRune. The buffer is grown as needed;
if it becomes too large, WriteRune will panic with ErrTooLarge.
WriteString appends the contents of s to the buffer, growing the buffer as
needed. The return value n is the length of s; err is always nil. If the
buffer becomes too large, WriteString will panic with ErrTooLarge.
WriteTo writes data to w until the buffer is drained or an error occurs.
The return value n is the number of bytes written; it always fits into an
int, but it is int64 to match the io.WriterTo interface. Any error
encountered during the write is also returned.
empty reports whether the unread portion of the buffer is empty.
grow grows the buffer to guarantee space for n more bytes.
It returns the index where bytes should be written.
If the buffer can't grow it will panic with ErrTooLarge.
readSlice is like ReadBytes but returns a reference to internal buffer data.
tryGrowByReslice is a inlineable version of grow for the fast-case where the
internal buffer only needs to be resliced.
It returns the index where bytes should be written and whether it succeeded.
*T : fmt.Stringer
*T : io.ByteReader
*T : io.ByteScanner
*T : io.ByteWriter
*T : io.Reader
*T : io.ReaderFrom
*T : io.ReadWriter
*T : io.RuneReader
*T : io.RuneScanner
*T : io.StringWriter
*T : io.Writer
*T : io.WriterTo
*T : context.stringer
*T : runtime.stringer
func NewBuffer(buf []byte) *Buffer
func NewBufferString(s string) *Buffer
func encoding/json.Compact(dst *Buffer, src []byte) error
func encoding/json.HTMLEscape(dst *Buffer, src []byte)
func encoding/json.Indent(dst *Buffer, src []byte, prefix, indent string) error
func encoding/json.compact(dst *Buffer, src []byte, escape bool) error
func encoding/json.newline(dst *Buffer, prefix, indent string, depth int)
A Reader implements the io.Reader, io.ReaderAt, io.WriterTo, io.Seeker,
io.ByteScanner, and io.RuneScanner interfaces by reading from
a byte slice.
Unlike a Buffer, a Reader is read-only and supports seeking.
The zero value for Reader operates like a Reader of an empty slice.
// current reading index
// index of previous rune; or < 0
s[]byte
Len returns the number of bytes of the unread portion of the
slice.
Read implements the io.Reader interface.
ReadAt implements the io.ReaderAt interface.
ReadByte implements the io.ByteReader interface.
ReadRune implements the io.RuneReader interface.
Reset resets the Reader to be reading from b.
Seek implements the io.Seeker interface.
Size returns the original length of the underlying byte slice.
Size is the number of bytes available for reading via ReadAt.
The returned value is always the same and is not affected by calls
to any other method.
UnreadByte complements ReadByte in implementing the io.ByteScanner interface.
UnreadRune complements ReadRune in implementing the io.RuneScanner interface.
WriteTo implements the io.WriterTo interface.
*T : io.ByteReader
*T : io.ByteScanner
*T : io.Reader
*T : io.ReaderAt
*T : io.ReadSeeker
*T : io.RuneReader
*T : io.RuneScanner
*T : io.Seeker
*T : io.WriterTo
func NewReader(b []byte) *Reader
asciiSet is a 32-byte value, where each bit represents the presence of a
given ASCII character in the set. The 128-bits of the lower 16 bytes,
starting with the least-significant bit of the lowest word to the
most-significant bit of the highest word, map to the full range of all
128 ASCII characters. The 128-bits of the upper 16 bytes will be zeroed,
ensuring that any non-ASCII character will be reported as not in the set.
contains reports whether c is inside the set.
func makeASCIISet(chars string) (as asciiSet, ok bool)
The readOp constants describe the last action performed on
the buffer, so that UnreadRune and UnreadByte can check for
invalid usage. opReadRuneX constants are chosen such that
converted to int they correspond to the rune size that was read.
const opInvalid
const opRead
const opReadRune1
const opReadRune2
const opReadRune3
const opReadRune4
Package-Level Functions (total 59, in which 50 are exported)
Compare returns an integer comparing two byte slices lexicographically.
The result will be 0 if a==b, -1 if a < b, and +1 if a > b.
A nil argument is equivalent to an empty slice.
Contains reports whether subslice is within b.
ContainsAny reports whether any of the UTF-8-encoded code points in chars are within b.
ContainsRune reports whether the rune is contained in the UTF-8-encoded byte slice b.
Count counts the number of non-overlapping instances of sep in s.
If sep is an empty slice, Count returns 1 + the number of UTF-8-encoded code points in s.
Equal reports whether a and b
are the same length and contain the same bytes.
A nil argument is equivalent to an empty slice.
EqualFold reports whether s and t, interpreted as UTF-8 strings,
are equal under Unicode case-folding, which is a more general
form of case-insensitivity.
Fields interprets s as a sequence of UTF-8-encoded code points.
It splits the slice s around each instance of one or more consecutive white space
characters, as defined by unicode.IsSpace, returning a slice of subslices of s or an
empty slice if s contains only white space.
FieldsFunc interprets s as a sequence of UTF-8-encoded code points.
It splits the slice s at each run of code points c satisfying f(c) and
returns a slice of subslices of s. If all code points in s satisfy f(c), or
len(s) == 0, an empty slice is returned.
FieldsFunc makes no guarantees about the order in which it calls f(c)
and assumes that f always returns the same value for a given c.
HasPrefix tests whether the byte slice s begins with prefix.
HasSuffix tests whether the byte slice s ends with suffix.
Index returns the index of the first instance of sep in s, or -1 if sep is not present in s.
IndexAny interprets s as a sequence of UTF-8-encoded Unicode code points.
It returns the byte index of the first occurrence in s of any of the Unicode
code points in chars. It returns -1 if chars is empty or if there is no code
point in common.
IndexByte returns the index of the first instance of c in b, or -1 if c is not present in b.
IndexFunc interprets s as a sequence of UTF-8-encoded code points.
It returns the byte index in s of the first Unicode
code point satisfying f(c), or -1 if none do.
IndexRune interprets s as a sequence of UTF-8-encoded code points.
It returns the byte index of the first occurrence in s of the given rune.
It returns -1 if rune is not present in s.
If r is utf8.RuneError, it returns the first instance of any
invalid UTF-8 byte sequence.
Join concatenates the elements of s to create a new byte slice. The separator
sep is placed between elements in the resulting slice.
LastIndex returns the index of the last instance of sep in s, or -1 if sep is not present in s.
LastIndexAny interprets s as a sequence of UTF-8-encoded Unicode code
points. It returns the byte index of the last occurrence in s of any of
the Unicode code points in chars. It returns -1 if chars is empty or if
there is no code point in common.
LastIndexByte returns the index of the last instance of c in s, or -1 if c is not present in s.
LastIndexFunc interprets s as a sequence of UTF-8-encoded code points.
It returns the byte index in s of the last Unicode
code point satisfying f(c), or -1 if none do.
Map returns a copy of the byte slice s with all its characters modified
according to the mapping function. If mapping returns a negative value, the character is
dropped from the byte slice with no replacement. The characters in s and the
output are interpreted as UTF-8-encoded code points.
NewBuffer creates and initializes a new Buffer using buf as its
initial contents. The new Buffer takes ownership of buf, and the
caller should not use buf after this call. NewBuffer is intended to
prepare a Buffer to read existing data. It can also be used to set
the initial size of the internal buffer for writing. To do that,
buf should have the desired capacity but a length of zero.
In most cases, new(Buffer) (or just declaring a Buffer variable) is
sufficient to initialize a Buffer.
NewBufferString creates and initializes a new Buffer using string s as its
initial contents. It is intended to prepare a buffer to read an existing
string.
In most cases, new(Buffer) (or just declaring a Buffer variable) is
sufficient to initialize a Buffer.
NewReader returns a new Reader reading from b.
Repeat returns a new byte slice consisting of count copies of b.
It panics if count is negative or if
the result of (len(b) * count) overflows.
Replace returns a copy of the slice s with the first n
non-overlapping instances of old replaced by new.
If old is empty, it matches at the beginning of the slice
and after each UTF-8 sequence, yielding up to k+1 replacements
for a k-rune slice.
If n < 0, there is no limit on the number of replacements.
ReplaceAll returns a copy of the slice s with all
non-overlapping instances of old replaced by new.
If old is empty, it matches at the beginning of the slice
and after each UTF-8 sequence, yielding up to k+1 replacements
for a k-rune slice.
Runes interprets s as a sequence of UTF-8-encoded code points.
It returns a slice of runes (Unicode code points) equivalent to s.
Split slices s into all subslices separated by sep and returns a slice of
the subslices between those separators.
If sep is empty, Split splits after each UTF-8 sequence.
It is equivalent to SplitN with a count of -1.
SplitAfter slices s into all subslices after each instance of sep and
returns a slice of those subslices.
If sep is empty, SplitAfter splits after each UTF-8 sequence.
It is equivalent to SplitAfterN with a count of -1.
SplitAfterN slices s into subslices after each instance of sep and
returns a slice of those subslices.
If sep is empty, SplitAfterN splits after each UTF-8 sequence.
The count determines the number of subslices to return:
n > 0: at most n subslices; the last subslice will be the unsplit remainder.
n == 0: the result is nil (zero subslices)
n < 0: all subslices
SplitN slices s into subslices separated by sep and returns a slice of
the subslices between those separators.
If sep is empty, SplitN splits after each UTF-8 sequence.
The count determines the number of subslices to return:
n > 0: at most n subslices; the last subslice will be the unsplit remainder.
n == 0: the result is nil (zero subslices)
n < 0: all subslices
Title treats s as UTF-8-encoded bytes and returns a copy with all Unicode letters that begin
words mapped to their title case.
BUG(rsc): The rule Title uses for word boundaries does not handle Unicode punctuation properly.
ToLower returns a copy of the byte slice s with all Unicode letters mapped to
their lower case.
ToLowerSpecial treats s as UTF-8-encoded bytes and returns a copy with all the Unicode letters mapped to their
lower case, giving priority to the special casing rules.
ToTitle treats s as UTF-8-encoded bytes and returns a copy with all the Unicode letters mapped to their title case.
ToTitleSpecial treats s as UTF-8-encoded bytes and returns a copy with all the Unicode letters mapped to their
title case, giving priority to the special casing rules.
ToUpper returns a copy of the byte slice s with all Unicode letters mapped to
their upper case.
ToUpperSpecial treats s as UTF-8-encoded bytes and returns a copy with all the Unicode letters mapped to their
upper case, giving priority to the special casing rules.
ToValidUTF8 treats s as UTF-8-encoded bytes and returns a copy with each run of bytes
representing invalid UTF-8 replaced with the bytes in replacement, which may be empty.
Trim returns a subslice of s by slicing off all leading and
trailing UTF-8-encoded code points contained in cutset.
TrimFunc returns a subslice of s by slicing off all leading and trailing
UTF-8-encoded code points c that satisfy f(c).
TrimLeft returns a subslice of s by slicing off all leading
UTF-8-encoded code points contained in cutset.
TrimLeftFunc treats s as UTF-8-encoded bytes and returns a subslice of s by slicing off
all leading UTF-8-encoded code points c that satisfy f(c).
TrimPrefix returns s without the provided leading prefix string.
If s doesn't start with prefix, s is returned unchanged.
TrimRight returns a subslice of s by slicing off all trailing
UTF-8-encoded code points that are contained in cutset.
TrimRightFunc returns a subslice of s by slicing off all trailing
UTF-8-encoded code points c that satisfy f(c).
TrimSpace returns a subslice of s by slicing off all leading and
trailing white space, as defined by Unicode.
TrimSuffix returns s without the provided trailing suffix string.
If s doesn't end with suffix, s is returned unchanged.
explode splits s into a slice of UTF-8 sequences, one per Unicode code point (still slices of bytes),
up to a maximum of n byte slices. Invalid UTF-8 sequences are chopped into individual bytes.
Generic split: splits after each instance of sep,
including sepSave bytes of sep in the subslices.
Package-Level Constants (total 9, in which 1 are exported)
MinRead is the minimum slice size passed to a Read call by
Buffer.ReadFrom. As long as the Buffer has at least MinRead bytes beyond
what is required to hold the contents of r, ReadFrom will not grow the
underlying buffer.
Don't use iota for these, as the values need to correspond with the
names and comments, which is easier to see when being explicit.
Don't use iota for these, as the values need to correspond with the
names and comments, which is easier to see when being explicit.
Don't use iota for these, as the values need to correspond with the
names and comments, which is easier to see when being explicit.
Don't use iota for these, as the values need to correspond with the
names and comments, which is easier to see when being explicit.
Don't use iota for these, as the values need to correspond with the
names and comments, which is easier to see when being explicit.
Don't use iota for these, as the values need to correspond with the
names and comments, which is easier to see when being explicit.
smallBufferSize is an initial allocation minimal capacity.
The pages are generated with Goldsv0.4.2. (GOOS=darwin GOARCH=amd64)
Golds is a Go 101 project developed by Tapir Liu.
PR and bug reports are welcome and can be submitted to the issue list.
Please follow @Go100and1 (reachable from the left QR code) to get the latest news of Golds.