Update dependencies, enable pruning for vendor/

So, `dep` got an nice new feature to remove tests and non-go files from
`vendor/`, and this brings the size of the vendor directory from ~300MiB
down to ~20MiB. We don that now.
This commit is contained in:
Alexander Neumann 2018-08-01 19:43:44 +02:00
parent 3422c1ca83
commit bff635bc5f
6741 changed files with 26942 additions and 4902033 deletions

View file

@ -1,499 +0,0 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package unicode
import (
"testing"
"golang.org/x/text/encoding"
"golang.org/x/text/encoding/charmap"
"golang.org/x/text/encoding/internal/enctest"
"golang.org/x/text/transform"
)
func TestBasics(t *testing.T) {
testCases := []struct {
e encoding.Encoding
encPrefix string
encSuffix string
encoded string
utf8 string
}{{
e: utf16BEIB,
encoded: "\x00\x57\x00\xe4\xd8\x35\xdd\x65",
utf8: "\x57\u00e4\U0001d565",
}, {
e: utf16BEEB,
encPrefix: "\xfe\xff",
encoded: "\x00\x57\x00\xe4\xd8\x35\xdd\x65",
utf8: "\x57\u00e4\U0001d565",
}, {
e: utf16LEIB,
encoded: "\x57\x00\xe4\x00\x35\xd8\x65\xdd",
utf8: "\x57\u00e4\U0001d565",
}, {
e: utf16LEEB,
encPrefix: "\xff\xfe",
encoded: "\x57\x00\xe4\x00\x35\xd8\x65\xdd",
utf8: "\x57\u00e4\U0001d565",
}}
for _, tc := range testCases {
enctest.TestEncoding(t, tc.e, tc.encoded, tc.utf8, tc.encPrefix, tc.encSuffix)
}
}
func TestFiles(t *testing.T) {
enctest.TestFile(t, UTF8)
enctest.TestFile(t, utf16LEIB)
}
func BenchmarkEncoding(b *testing.B) {
enctest.Benchmark(b, UTF8)
enctest.Benchmark(b, utf16LEIB)
}
var (
utf16LEIB = UTF16(LittleEndian, IgnoreBOM) // UTF-16LE (atypical interpretation)
utf16LEUB = UTF16(LittleEndian, UseBOM) // UTF-16, LE
utf16LEEB = UTF16(LittleEndian, ExpectBOM) // UTF-16, LE, Expect
utf16BEIB = UTF16(BigEndian, IgnoreBOM) // UTF-16BE (atypical interpretation)
utf16BEUB = UTF16(BigEndian, UseBOM) // UTF-16 default
utf16BEEB = UTF16(BigEndian, ExpectBOM) // UTF-16 Expect
)
func TestUTF16(t *testing.T) {
testCases := []struct {
desc string
src string
notEOF bool // the inverse of atEOF
sizeDst int
want string
nSrc int
err error
t transform.Transformer
}{{
desc: "utf-16 IgnoreBOM dec: empty string",
t: utf16BEIB.NewDecoder(),
}, {
desc: "utf-16 UseBOM dec: empty string",
t: utf16BEUB.NewDecoder(),
}, {
desc: "utf-16 ExpectBOM dec: empty string",
err: ErrMissingBOM,
t: utf16BEEB.NewDecoder(),
}, {
desc: "utf-16 dec: BOM determines encoding BE (RFC 2781:3.3)",
src: "\xFE\xFF\xD8\x08\xDF\x45\x00\x3D\x00\x52\x00\x61",
sizeDst: 100,
want: "\U00012345=Ra",
nSrc: 12,
t: utf16BEUB.NewDecoder(),
}, {
desc: "utf-16 dec: BOM determines encoding LE (RFC 2781:3.3)",
src: "\xFF\xFE\x08\xD8\x45\xDF\x3D\x00\x52\x00\x61\x00",
sizeDst: 100,
want: "\U00012345=Ra",
nSrc: 12,
t: utf16LEUB.NewDecoder(),
}, {
desc: "utf-16 dec: BOM determines encoding LE, change default (RFC 2781:3.3)",
src: "\xFF\xFE\x08\xD8\x45\xDF\x3D\x00\x52\x00\x61\x00",
sizeDst: 100,
want: "\U00012345=Ra",
nSrc: 12,
t: utf16BEUB.NewDecoder(),
}, {
desc: "utf-16 dec: Fail on missing BOM when required",
src: "\x08\xD8\x45\xDF\x3D\x00\xFF\xFE\xFE\xFF\x00\x52\x00\x61",
sizeDst: 100,
want: "",
nSrc: 0,
err: ErrMissingBOM,
t: utf16BEEB.NewDecoder(),
}, {
desc: "utf-16 dec: SHOULD interpret text as big-endian when BOM not present (RFC 2781:4.3)",
src: "\xD8\x08\xDF\x45\x00\x3D\x00\x52\x00\x61",
sizeDst: 100,
want: "\U00012345=Ra",
nSrc: 10,
t: utf16BEUB.NewDecoder(),
}, {
// This is an error according to RFC 2781. But errors in RFC 2781 are
// open to interpretations, so I guess this is fine.
desc: "utf-16le dec: incorrect BOM is an error (RFC 2781:4.1)",
src: "\xFE\xFF\x08\xD8\x45\xDF\x3D\x00\x52\x00\x61\x00",
sizeDst: 100,
want: "\uFFFE\U00012345=Ra",
nSrc: 12,
t: utf16LEIB.NewDecoder(),
}, {
desc: "utf-16 enc: SHOULD write BOM (RFC 2781:3.3)",
src: "\U00012345=Ra",
sizeDst: 100,
want: "\xFF\xFE\x08\xD8\x45\xDF\x3D\x00\x52\x00\x61\x00",
nSrc: 7,
t: utf16LEUB.NewEncoder(),
}, {
desc: "utf-16 enc: SHOULD write BOM (RFC 2781:3.3)",
src: "\U00012345=Ra",
sizeDst: 100,
want: "\xFE\xFF\xD8\x08\xDF\x45\x00\x3D\x00\x52\x00\x61",
nSrc: 7,
t: utf16BEUB.NewEncoder(),
}, {
desc: "utf-16le enc: MUST NOT write BOM (RFC 2781:3.3)",
src: "\U00012345=Ra",
sizeDst: 100,
want: "\x08\xD8\x45\xDF\x3D\x00\x52\x00\x61\x00",
nSrc: 7,
t: utf16LEIB.NewEncoder(),
}, {
desc: "utf-16be dec: incorrect UTF-16: odd bytes",
src: "\x00",
sizeDst: 100,
want: "\uFFFD",
nSrc: 1,
t: utf16BEIB.NewDecoder(),
}, {
desc: "utf-16be dec: unpaired surrogate, odd bytes",
src: "\xD8\x45\x00",
sizeDst: 100,
want: "\uFFFD\uFFFD",
nSrc: 3,
t: utf16BEIB.NewDecoder(),
}, {
desc: "utf-16be dec: unpaired low surrogate + valid text",
src: "\xD8\x45\x00a",
sizeDst: 100,
want: "\uFFFDa",
nSrc: 4,
t: utf16BEIB.NewDecoder(),
}, {
desc: "utf-16be dec: unpaired low surrogate + valid text + single byte",
src: "\xD8\x45\x00ab",
sizeDst: 100,
want: "\uFFFDa\uFFFD",
nSrc: 5,
t: utf16BEIB.NewDecoder(),
}, {
desc: "utf-16le dec: unpaired high surrogate",
src: "\x00\x00\x00\xDC\x12\xD8",
sizeDst: 100,
want: "\x00\uFFFD\uFFFD",
nSrc: 6,
t: utf16LEIB.NewDecoder(),
}, {
desc: "utf-16be dec: two unpaired low surrogates",
src: "\xD8\x45\xD8\x12",
sizeDst: 100,
want: "\uFFFD\uFFFD",
nSrc: 4,
t: utf16BEIB.NewDecoder(),
}, {
desc: "utf-16be dec: short dst",
src: "\x00a",
sizeDst: 0,
want: "",
nSrc: 0,
t: utf16BEIB.NewDecoder(),
err: transform.ErrShortDst,
}, {
desc: "utf-16be dec: short dst surrogate",
src: "\xD8\xF5\xDC\x12",
sizeDst: 3,
want: "",
nSrc: 0,
t: utf16BEIB.NewDecoder(),
err: transform.ErrShortDst,
}, {
desc: "utf-16be dec: short dst trailing byte",
src: "\x00",
sizeDst: 2,
want: "",
nSrc: 0,
t: utf16BEIB.NewDecoder(),
err: transform.ErrShortDst,
}, {
desc: "utf-16be dec: short src",
src: "\x00",
notEOF: true,
sizeDst: 3,
want: "",
nSrc: 0,
t: utf16BEIB.NewDecoder(),
err: transform.ErrShortSrc,
}, {
desc: "utf-16 enc",
src: "\U00012345=Ra",
sizeDst: 100,
want: "\xFE\xFF\xD8\x08\xDF\x45\x00\x3D\x00\x52\x00\x61",
nSrc: 7,
t: utf16BEUB.NewEncoder(),
}, {
desc: "utf-16 enc: short dst normal",
src: "\U00012345=Ra",
sizeDst: 9,
want: "\xD8\x08\xDF\x45\x00\x3D\x00\x52",
nSrc: 6,
t: utf16BEIB.NewEncoder(),
err: transform.ErrShortDst,
}, {
desc: "utf-16 enc: short dst surrogate",
src: "\U00012345=Ra",
sizeDst: 3,
want: "",
nSrc: 0,
t: utf16BEIB.NewEncoder(),
err: transform.ErrShortDst,
}, {
desc: "utf-16 enc: short src",
src: "\U00012345=Ra\xC2",
notEOF: true,
sizeDst: 100,
want: "\xD8\x08\xDF\x45\x00\x3D\x00\x52\x00\x61",
nSrc: 7,
t: utf16BEIB.NewEncoder(),
err: transform.ErrShortSrc,
}, {
desc: "utf-16be dec: don't change byte order mid-stream",
src: "\xFE\xFF\xD8\x08\xDF\x45\x00\x3D\xFF\xFE\x00\x52\x00\x61",
sizeDst: 100,
want: "\U00012345=\ufffeRa",
nSrc: 14,
t: utf16BEUB.NewDecoder(),
}, {
desc: "utf-16le dec: don't change byte order mid-stream",
src: "\xFF\xFE\x08\xD8\x45\xDF\x3D\x00\xFF\xFE\xFE\xFF\x52\x00\x61\x00",
sizeDst: 100,
want: "\U00012345=\ufeff\ufffeRa",
nSrc: 16,
t: utf16LEUB.NewDecoder(),
}}
for i, tc := range testCases {
b := make([]byte, tc.sizeDst)
nDst, nSrc, err := tc.t.Transform(b, []byte(tc.src), !tc.notEOF)
if err != tc.err {
t.Errorf("%d:%s: error was %v; want %v", i, tc.desc, err, tc.err)
}
if got := string(b[:nDst]); got != tc.want {
t.Errorf("%d:%s: result was %q: want %q", i, tc.desc, got, tc.want)
}
if nSrc != tc.nSrc {
t.Errorf("%d:%s: nSrc was %d; want %d", i, tc.desc, nSrc, tc.nSrc)
}
}
}
func TestUTF8Decoder(t *testing.T) {
testCases := []struct {
desc string
src string
notEOF bool // the inverse of atEOF
sizeDst int
want string
nSrc int
err error
}{{
desc: "empty string, empty dest buffer",
}, {
desc: "empty string",
sizeDst: 8,
}, {
desc: "empty string, streaming",
notEOF: true,
sizeDst: 8,
}, {
desc: "ascii",
src: "abcde",
sizeDst: 8,
want: "abcde",
nSrc: 5,
}, {
desc: "ascii and error",
src: "ab\x80de",
sizeDst: 7,
want: "ab\ufffdde",
nSrc: 5,
}, {
desc: "valid two-byte sequence",
src: "a\u0300bc",
sizeDst: 7,
want: "a\u0300bc",
nSrc: 5,
}, {
desc: "valid three-byte sequence",
src: "a\u0300中",
sizeDst: 7,
want: "a\u0300中",
nSrc: 6,
}, {
desc: "valid four-byte sequence",
src: "a中\U00016F50",
sizeDst: 8,
want: "a中\U00016F50",
nSrc: 8,
}, {
desc: "short source buffer",
src: "abc\xf0\x90",
notEOF: true,
sizeDst: 10,
want: "abc",
nSrc: 3,
err: transform.ErrShortSrc,
}, {
// We don't check for the maximal subpart of an ill-formed subsequence
// at the end of an open segment.
desc: "complete invalid that looks like short at end",
src: "abc\xf0\x80",
notEOF: true,
sizeDst: 10,
want: "abc", // instead of "abc\ufffd\ufffd",
nSrc: 3,
err: transform.ErrShortSrc,
}, {
desc: "incomplete sequence at end",
src: "a\x80bc\xf0\x90",
sizeDst: 9,
want: "a\ufffdbc\ufffd",
nSrc: 6,
}, {
desc: "invalid second byte",
src: "abc\xf0dddd",
sizeDst: 10,
want: "abc\ufffddddd",
nSrc: 8,
}, {
desc: "invalid second byte at end",
src: "abc\xf0d",
sizeDst: 10,
want: "abc\ufffdd",
nSrc: 5,
}, {
desc: "invalid third byte",
src: "a\u0300bc\xf0\x90dddd",
sizeDst: 12,
want: "a\u0300bc\ufffddddd",
nSrc: 11,
}, {
desc: "invalid third byte at end",
src: "a\u0300bc\xf0\x90d",
sizeDst: 12,
want: "a\u0300bc\ufffdd",
nSrc: 8,
}, {
desc: "invalid fourth byte, tight buffer",
src: "a\u0300bc\xf0\x90\x80d",
sizeDst: 9,
want: "a\u0300bc\ufffdd",
nSrc: 9,
}, {
desc: "invalid fourth byte at end",
src: "a\u0300bc\xf0\x90\x80",
sizeDst: 8,
want: "a\u0300bc\ufffd",
nSrc: 8,
}, {
desc: "invalid fourth byte and short four byte sequence",
src: "a\u0300bc\xf0\x90\x80\xf0\x90\x80",
notEOF: true,
sizeDst: 20,
want: "a\u0300bc\ufffd",
nSrc: 8,
err: transform.ErrShortSrc,
}, {
desc: "valid four-byte sequence overflowing short buffer",
src: "a\u0300bc\xf0\x90\x80\x80",
notEOF: true,
sizeDst: 8,
want: "a\u0300bc",
nSrc: 5,
err: transform.ErrShortDst,
}, {
desc: "invalid fourth byte at end short, but short dst",
src: "a\u0300bc\xf0\x90\x80\xf0\x90\x80",
notEOF: true,
sizeDst: 8,
// More bytes would fit in the buffer, but this seems to require a more
// complicated and slower algorithm.
want: "a\u0300bc", // instead of "a\u0300bc"
nSrc: 5,
err: transform.ErrShortDst,
}, {
desc: "short dst for error",
src: "abc\x80",
notEOF: true,
sizeDst: 5,
want: "abc",
nSrc: 3,
err: transform.ErrShortDst,
}, {
desc: "adjusting short dst buffer",
src: "abc\x80ef",
notEOF: true,
sizeDst: 6,
want: "abc\ufffd",
nSrc: 4,
err: transform.ErrShortDst,
}}
tr := UTF8.NewDecoder()
for i, tc := range testCases {
b := make([]byte, tc.sizeDst)
nDst, nSrc, err := tr.Transform(b, []byte(tc.src), !tc.notEOF)
if err != tc.err {
t.Errorf("%d:%s: error was %v; want %v", i, tc.desc, err, tc.err)
}
if got := string(b[:nDst]); got != tc.want {
t.Errorf("%d:%s: result was %q: want %q", i, tc.desc, got, tc.want)
}
if nSrc != tc.nSrc {
t.Errorf("%d:%s: nSrc was %d; want %d", i, tc.desc, nSrc, tc.nSrc)
}
}
}
func TestBOMOverride(t *testing.T) {
dec := BOMOverride(charmap.CodePage437.NewDecoder())
dst := make([]byte, 100)
for i, tc := range []struct {
src string
atEOF bool
dst string
nSrc int
err error
}{
0: {"H\x82ll\x93", true, "Héllô", 5, nil},
1: {"\uFEFFHéllö", true, "Héllö", 10, nil},
2: {"\xFE\xFF\x00H\x00e\x00l\x00l\x00o", true, "Hello", 12, nil},
3: {"\xFF\xFEH\x00e\x00l\x00l\x00o\x00", true, "Hello", 12, nil},
4: {"\uFEFF", true, "", 3, nil},
5: {"\xFE\xFF", true, "", 2, nil},
6: {"\xFF\xFE", true, "", 2, nil},
7: {"\xEF\xBB", true, "\u2229\u2557", 2, nil},
8: {"\xEF", true, "\u2229", 1, nil},
9: {"", true, "", 0, nil},
10: {"\xFE", true, "\u25a0", 1, nil},
11: {"\xFF", true, "\u00a0", 1, nil},
12: {"\xEF\xBB", false, "", 0, transform.ErrShortSrc},
13: {"\xEF", false, "", 0, transform.ErrShortSrc},
14: {"", false, "", 0, transform.ErrShortSrc},
15: {"\xFE", false, "", 0, transform.ErrShortSrc},
16: {"\xFF", false, "", 0, transform.ErrShortSrc},
17: {"\xFF\xFE", false, "", 0, transform.ErrShortSrc},
} {
dec.Reset()
nDst, nSrc, err := dec.Transform(dst, []byte(tc.src), tc.atEOF)
got := string(dst[:nDst])
if nSrc != tc.nSrc {
t.Errorf("%d: nSrc: got %d; want %d", i, nSrc, tc.nSrc)
}
if got != tc.dst {
t.Errorf("%d: got %+q; want %+q", i, got, tc.dst)
}
if err != tc.err {
t.Errorf("%d: error: got %v; want %v", i, err, tc.err)
}
}
}

View file

@ -1,296 +0,0 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package utf32 provides the UTF-32 Unicode encoding.
//
// Please note that support for UTF-32 is discouraged as it is a rare and
// inefficient encoding, unfit for use as an interchange format. For use
// on the web, the W3C strongly discourages its use
// (https://www.w3.org/TR/html5/document-metadata.html#charset)
// while WHATWG directly prohibits supporting it
// (https://html.spec.whatwg.org/multipage/syntax.html#character-encodings).
package utf32 // import "golang.org/x/text/encoding/unicode/utf32"
import (
"errors"
"unicode/utf8"
"golang.org/x/text/encoding"
"golang.org/x/text/encoding/internal/identifier"
"golang.org/x/text/transform"
)
// All lists a configuration for each IANA-defined UTF-32 variant.
var All = []encoding.Encoding{
UTF32(BigEndian, UseBOM),
UTF32(BigEndian, IgnoreBOM),
UTF32(LittleEndian, IgnoreBOM),
}
// ErrMissingBOM means that decoding UTF-32 input with ExpectBOM did not
// find a starting byte order mark.
var ErrMissingBOM = errors.New("encoding: missing byte order mark")
// UTF32 returns a UTF-32 Encoding for the given default endianness and
// byte order mark (BOM) policy.
//
// When decoding from UTF-32 to UTF-8, if the BOMPolicy is IgnoreBOM then
// neither BOMs U+FEFF nor ill-formed code units 0xFFFE0000 in the input
// stream will affect the endianness used for decoding. Instead BOMs will
// be output as their standard UTF-8 encoding "\xef\xbb\xbf" while
// 0xFFFE0000 code units will be output as "\xef\xbf\xbd", the standard
// UTF-8 encoding for the Unicode replacement character. If the BOMPolicy
// is UseBOM or ExpectBOM a starting BOM is not written to the UTF-8
// output. Instead, it overrides the default endianness e for the remainder
// of the transformation. Any subsequent BOMs U+FEFF or ill-formed code
// units 0xFFFE0000 will not affect the endianness used, and will instead
// be output as their standard UTF-8 (replacement) encodings. For UseBOM,
// if there is no starting BOM, it will proceed with the default
// Endianness. For ExpectBOM, in that case, the transformation will return
// early with an ErrMissingBOM error.
//
// When encoding from UTF-8 to UTF-32, a BOM will be inserted at the start
// of the output if the BOMPolicy is UseBOM or ExpectBOM. Otherwise, a BOM
// will not be inserted. The UTF-8 input does not need to contain a BOM.
//
// There is no concept of a 'native' endianness. If the UTF-32 data is
// produced and consumed in a greater context that implies a certain
// endianness, use IgnoreBOM. Otherwise, use ExpectBOM and always produce
// and consume a BOM.
//
// In the language of http://www.unicode.org/faq/utf_bom.html#bom10,
// IgnoreBOM corresponds to "Where the precise type of the data stream is
// known... the BOM should not be used" and ExpectBOM corresponds to "A
// particular protocol... may require use of the BOM".
func UTF32(e Endianness, b BOMPolicy) encoding.Encoding {
return utf32Encoding{config{e, b}, mibValue[e][b&bomMask]}
}
// mibValue maps Endianness and BOMPolicy settings to MIB constants for UTF-32.
// Note that some configurations map to the same MIB identifier.
var mibValue = map[Endianness][numBOMValues]identifier.MIB{
BigEndian: [numBOMValues]identifier.MIB{
IgnoreBOM: identifier.UTF32BE,
UseBOM: identifier.UTF32,
},
LittleEndian: [numBOMValues]identifier.MIB{
IgnoreBOM: identifier.UTF32LE,
UseBOM: identifier.UTF32,
},
// ExpectBOM is not widely used and has no valid MIB identifier.
}
// BOMPolicy is a UTF-32 encodings's byte order mark policy.
type BOMPolicy uint8
const (
writeBOM BOMPolicy = 0x01
acceptBOM BOMPolicy = 0x02
requireBOM BOMPolicy = 0x04
bomMask BOMPolicy = 0x07
// HACK: numBOMValues == 8 triggers a bug in the 1.4 compiler (cannot have a
// map of an array of length 8 of a type that is also used as a key or value
// in another map). See golang.org/issue/11354.
// TODO: consider changing this value back to 8 if the use of 1.4.* has
// been minimized.
numBOMValues = 8 + 1
// IgnoreBOM means to ignore any byte order marks.
IgnoreBOM BOMPolicy = 0
// Unicode-compliant interpretation for UTF-32BE/LE.
// UseBOM means that the UTF-32 form may start with a byte order mark,
// which will be used to override the default encoding.
UseBOM BOMPolicy = writeBOM | acceptBOM
// Unicode-compliant interpretation for UTF-32.
// ExpectBOM means that the UTF-32 form must start with a byte order mark,
// which will be used to override the default encoding.
ExpectBOM BOMPolicy = writeBOM | acceptBOM | requireBOM
// Consistent with BOMPolicy definition in golang.org/x/text/encoding/unicode
)
// Endianness is a UTF-32 encoding's default endianness.
type Endianness bool
const (
// BigEndian is UTF-32BE.
BigEndian Endianness = false
// LittleEndian is UTF-32LE.
LittleEndian Endianness = true
)
type config struct {
endianness Endianness
bomPolicy BOMPolicy
}
type utf32Encoding struct {
config
mib identifier.MIB
}
func (u utf32Encoding) NewDecoder() *encoding.Decoder {
return &encoding.Decoder{Transformer: &utf32Decoder{
initial: u.config,
current: u.config,
}}
}
func (u utf32Encoding) NewEncoder() *encoding.Encoder {
return &encoding.Encoder{Transformer: &utf32Encoder{
endianness: u.endianness,
initialBOMPolicy: u.bomPolicy,
currentBOMPolicy: u.bomPolicy,
}}
}
func (u utf32Encoding) ID() (mib identifier.MIB, other string) {
return u.mib, ""
}
func (u utf32Encoding) String() string {
e, b := "B", ""
if u.endianness == LittleEndian {
e = "L"
}
switch u.bomPolicy {
case ExpectBOM:
b = "Expect"
case UseBOM:
b = "Use"
case IgnoreBOM:
b = "Ignore"
}
return "UTF-32" + e + "E (" + b + " BOM)"
}
type utf32Decoder struct {
initial config
current config
}
func (u *utf32Decoder) Reset() {
u.current = u.initial
}
func (u *utf32Decoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
if len(src) == 0 {
if atEOF && u.current.bomPolicy&requireBOM != 0 {
return 0, 0, ErrMissingBOM
}
return 0, 0, nil
}
if u.current.bomPolicy&acceptBOM != 0 {
if len(src) < 4 {
return 0, 0, transform.ErrShortSrc
}
switch {
case src[0] == 0x00 && src[1] == 0x00 && src[2] == 0xfe && src[3] == 0xff:
u.current.endianness = BigEndian
nSrc = 4
case src[0] == 0xff && src[1] == 0xfe && src[2] == 0x00 && src[3] == 0x00:
u.current.endianness = LittleEndian
nSrc = 4
default:
if u.current.bomPolicy&requireBOM != 0 {
return 0, 0, ErrMissingBOM
}
}
u.current.bomPolicy = IgnoreBOM
}
var r rune
var dSize, sSize int
for nSrc < len(src) {
if nSrc+3 < len(src) {
x := uint32(src[nSrc+0])<<24 | uint32(src[nSrc+1])<<16 |
uint32(src[nSrc+2])<<8 | uint32(src[nSrc+3])
if u.current.endianness == LittleEndian {
x = x>>24 | (x >> 8 & 0x0000FF00) | (x << 8 & 0x00FF0000) | x<<24
}
r, sSize = rune(x), 4
if dSize = utf8.RuneLen(r); dSize < 0 {
r, dSize = utf8.RuneError, 3
}
} else if atEOF {
// 1..3 trailing bytes.
r, dSize, sSize = utf8.RuneError, 3, len(src)-nSrc
} else {
err = transform.ErrShortSrc
break
}
if nDst+dSize > len(dst) {
err = transform.ErrShortDst
break
}
nDst += utf8.EncodeRune(dst[nDst:], r)
nSrc += sSize
}
return nDst, nSrc, err
}
type utf32Encoder struct {
endianness Endianness
initialBOMPolicy BOMPolicy
currentBOMPolicy BOMPolicy
}
func (u *utf32Encoder) Reset() {
u.currentBOMPolicy = u.initialBOMPolicy
}
func (u *utf32Encoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
if u.currentBOMPolicy&writeBOM != 0 {
if len(dst) < 4 {
return 0, 0, transform.ErrShortDst
}
dst[0], dst[1], dst[2], dst[3] = 0x00, 0x00, 0xfe, 0xff
u.currentBOMPolicy = IgnoreBOM
nDst = 4
}
r, size := rune(0), 0
for nSrc < len(src) {
r = rune(src[nSrc])
// Decode a 1-byte rune.
if r < utf8.RuneSelf {
size = 1
} else {
// Decode a multi-byte rune.
r, size = utf8.DecodeRune(src[nSrc:])
if size == 1 {
// All valid runes of size 1 (those below utf8.RuneSelf) were
// handled above. We have invalid UTF-8 or we haven't seen the
// full character yet.
if !atEOF && !utf8.FullRune(src[nSrc:]) {
err = transform.ErrShortSrc
break
}
}
}
if nDst+4 > len(dst) {
err = transform.ErrShortDst
break
}
dst[nDst+0] = uint8(r >> 24)
dst[nDst+1] = uint8(r >> 16)
dst[nDst+2] = uint8(r >> 8)
dst[nDst+3] = uint8(r)
nDst += 4
nSrc += size
}
if u.endianness == LittleEndian {
for i := 0; i < nDst; i += 4 {
dst[i], dst[i+1], dst[i+2], dst[i+3] = dst[i+3], dst[i+2], dst[i+1], dst[i]
}
}
return nDst, nSrc, err
}

View file

@ -1,248 +0,0 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package utf32
import (
"testing"
"golang.org/x/text/encoding"
"golang.org/x/text/encoding/internal/enctest"
"golang.org/x/text/transform"
)
var (
utf32LEIB = UTF32(LittleEndian, IgnoreBOM) // UTF-32LE (atypical interpretation)
utf32LEUB = UTF32(LittleEndian, UseBOM) // UTF-32, LE
// utf32LEEB = UTF32(LittleEndian, ExpectBOM) // UTF-32, LE, Expect - covered in encoding_test.go
utf32BEIB = UTF32(BigEndian, IgnoreBOM) // UTF-32BE (atypical interpretation)
utf32BEUB = UTF32(BigEndian, UseBOM) // UTF-32 default
utf32BEEB = UTF32(BigEndian, ExpectBOM) // UTF-32 Expect
)
func TestBasics(t *testing.T) {
testCases := []struct {
e encoding.Encoding
encPrefix string
encSuffix string
encoded string
utf8 string
}{{
e: utf32BEIB,
encoded: "\x00\x00\x00\x57\x00\x00\x00\xe4\x00\x01\xd5\x65",
utf8: "\x57\u00e4\U0001d565",
}, {
e: UTF32(BigEndian, ExpectBOM),
encPrefix: "\x00\x00\xfe\xff",
encoded: "\x00\x00\x00\x57\x00\x00\x00\xe4\x00\x01\xd5\x65",
utf8: "\x57\u00e4\U0001d565",
}, {
e: UTF32(LittleEndian, IgnoreBOM),
encoded: "\x57\x00\x00\x00\xe4\x00\x00\x00\x65\xd5\x01\x00",
utf8: "\x57\u00e4\U0001d565",
}, {
e: UTF32(LittleEndian, ExpectBOM),
encPrefix: "\xff\xfe\x00\x00",
encoded: "\x57\x00\x00\x00\xe4\x00\x00\x00\x65\xd5\x01\x00",
utf8: "\x57\u00e4\U0001d565",
}}
for _, tc := range testCases {
enctest.TestEncoding(t, tc.e, tc.encoded, tc.utf8, tc.encPrefix, tc.encSuffix)
}
}
func TestFiles(t *testing.T) { enctest.TestFile(t, utf32BEIB) }
func BenchmarkEncoding(b *testing.B) { enctest.Benchmark(b, utf32BEIB) }
func TestUTF32(t *testing.T) {
testCases := []struct {
desc string
src string
notEOF bool // the inverse of atEOF
sizeDst int
want string
nSrc int
err error
t transform.Transformer
}{{
desc: "utf-32 IgnoreBOM dec: empty string",
t: utf32BEIB.NewDecoder(),
}, {
desc: "utf-32 UseBOM dec: empty string",
t: utf32BEUB.NewDecoder(),
}, {
desc: "utf-32 ExpectBOM dec: empty string",
err: ErrMissingBOM,
t: utf32BEEB.NewDecoder(),
}, {
desc: "utf-32be dec: Doesn't interpret U+FEFF as BOM",
src: "\x00\x00\xFE\xFF\x00\x01\x23\x45\x00\x00\x00\x3D\x00\x00\x00\x52\x00\x00\x00\x61",
sizeDst: 100,
want: "\uFEFF\U00012345=Ra",
nSrc: 20,
t: utf32BEIB.NewDecoder(),
}, {
desc: "utf-32be dec: Interprets little endian U+FEFF as invalid",
src: "\xFF\xFE\x00\x00\x00\x01\x23\x45\x00\x00\x00\x3D\x00\x00\x00\x52\x00\x00\x00\x61",
sizeDst: 100,
want: "\uFFFD\U00012345=Ra",
nSrc: 20,
t: utf32BEIB.NewDecoder(),
}, {
desc: "utf-32le dec: Doesn't interpret U+FEFF as BOM",
src: "\xFF\xFE\x00\x00\x45\x23\x01\x00\x3D\x00\x00\x00\x52\x00\x00\x00\x61\x00\x00\x00",
sizeDst: 100,
want: "\uFEFF\U00012345=Ra",
nSrc: 20,
t: utf32LEIB.NewDecoder(),
}, {
desc: "utf-32le dec: Interprets big endian U+FEFF as invalid",
src: "\x00\x00\xFE\xFF\x45\x23\x01\x00\x3D\x00\x00\x00\x52\x00\x00\x00\x61\x00\x00\x00",
sizeDst: 100,
want: "\uFFFD\U00012345=Ra",
nSrc: 20,
t: utf32LEIB.NewDecoder(),
}, {
desc: "utf-32 enc: Writes big-endian BOM",
src: "\U00012345=Ra",
sizeDst: 100,
want: "\x00\x00\xFE\xFF\x00\x01\x23\x45\x00\x00\x00\x3D\x00\x00\x00\x52\x00\x00\x00\x61",
nSrc: 7,
t: utf32BEUB.NewEncoder(),
}, {
desc: "utf-32 enc: Writes little-endian BOM",
src: "\U00012345=Ra",
sizeDst: 100,
want: "\xFF\xFE\x00\x00\x45\x23\x01\x00\x3D\x00\x00\x00\x52\x00\x00\x00\x61\x00\x00\x00",
nSrc: 7,
t: utf32LEUB.NewEncoder(),
}, {
desc: "utf-32 dec: Interprets text using big-endian default when BOM not present",
src: "\x00\x01\x23\x45\x00\x00\x00\x3D\x00\x00\x00\x52\x00\x00\x00\x61",
sizeDst: 100,
want: "\U00012345=Ra",
nSrc: 16,
t: utf32BEUB.NewDecoder(),
}, {
desc: "utf-32 dec: Interprets text using little-endian default when BOM not present",
src: "\x45\x23\x01\x00\x3D\x00\x00\x00\x52\x00\x00\x00\x61\x00\x00\x00",
sizeDst: 100,
want: "\U00012345=Ra",
nSrc: 16,
t: utf32LEUB.NewDecoder(),
}, {
desc: "utf-32 dec: BOM determines encoding BE",
src: "\x00\x00\xFE\xFF\x00\x01\x23\x45\x00\x00\x00\x3D\x00\x00\x00\x52\x00\x00\x00\x61",
sizeDst: 100,
want: "\U00012345=Ra",
nSrc: 20,
t: utf32BEUB.NewDecoder(),
}, {
desc: "utf-32 dec: BOM determines encoding LE",
src: "\xFF\xFE\x00\x00\x45\x23\x01\x00\x3D\x00\x00\x00\x52\x00\x00\x00\x61\x00\x00\x00",
sizeDst: 100,
want: "\U00012345=Ra",
nSrc: 20,
t: utf32LEUB.NewDecoder(),
}, {
desc: "utf-32 dec: BOM determines encoding LE, change default",
src: "\xFF\xFE\x00\x00\x45\x23\x01\x00\x3D\x00\x00\x00\x52\x00\x00\x00\x61\x00\x00\x00",
sizeDst: 100,
want: "\U00012345=Ra",
nSrc: 20,
t: utf32BEUB.NewDecoder(),
}, {
desc: "utf-32 dec: BOM determines encoding BE, change default",
src: "\x00\x00\xFE\xFF\x00\x01\x23\x45\x00\x00\x00\x3D\x00\x00\x00\x52\x00\x00\x00\x61",
sizeDst: 100,
want: "\U00012345=Ra",
nSrc: 20,
t: utf32LEUB.NewDecoder(),
}, {
desc: "utf-32 dec: Don't change big-endian byte order mid-stream",
src: "\x00\x01\x23\x45\x00\x00\x00\x3D\xFF\xFE\x00\x00\x00\x00\xFE\xFF\x00\x00\x00\x52\x00\x00\x00\x61",
sizeDst: 100,
want: "\U00012345=\uFFFD\uFEFFRa",
nSrc: 24,
t: utf32BEUB.NewDecoder(),
}, {
desc: "utf-32 dec: Don't change little-endian byte order mid-stream",
src: "\x45\x23\x01\x00\x3D\x00\x00\x00\x00\x00\xFE\xFF\xFF\xFE\x00\x00\x52\x00\x00\x00\x61\x00\x00\x00",
sizeDst: 100,
want: "\U00012345=\uFFFD\uFEFFRa",
nSrc: 24,
t: utf32LEUB.NewDecoder(),
}, {
desc: "utf-32 dec: Fail on missing BOM when required",
src: "\x00\x01\x23\x45\x00\x00\x00\x3D\x00\x00\x00\x52\x00\x00\x00\x61",
sizeDst: 100,
want: "",
nSrc: 0,
err: ErrMissingBOM,
t: utf32BEEB.NewDecoder(),
}, {
desc: "utf-32 enc: Short dst",
src: "\U00012345=Ra",
sizeDst: 15,
want: "\x00\x01\x23\x45\x00\x00\x00\x3D\x00\x00\x00\x52",
nSrc: 6,
err: transform.ErrShortDst,
t: utf32BEIB.NewEncoder(),
}, {
desc: "utf-32 enc: Short src",
src: "\U00012345=Ra\xC2",
notEOF: true,
sizeDst: 100,
want: "\x00\x01\x23\x45\x00\x00\x00\x3D\x00\x00\x00\x52\x00\x00\x00\x61",
nSrc: 7,
err: transform.ErrShortSrc,
t: utf32BEIB.NewEncoder(),
}, {
desc: "utf-32 enc: Invalid input",
src: "\x80\xC1\xC2\x7F\xC2",
sizeDst: 100,
want: "\x00\x00\xFF\xFD\x00\x00\xFF\xFD\x00\x00\xFF\xFD\x00\x00\x00\x7F\x00\x00\xFF\xFD",
nSrc: 5,
t: utf32BEIB.NewEncoder(),
}, {
desc: "utf-32 dec: Short dst",
src: "\x00\x00\x00\x41",
sizeDst: 0,
want: "",
nSrc: 0,
err: transform.ErrShortDst,
t: utf32BEIB.NewDecoder(),
}, {
desc: "utf-32 dec: Short src",
src: "\x00\x00\x00",
notEOF: true,
sizeDst: 4,
want: "",
nSrc: 0,
err: transform.ErrShortSrc,
t: utf32BEIB.NewDecoder(),
}, {
desc: "utf-32 dec: Invalid input",
src: "\x00\x00\xD8\x00\x00\x00\xDF\xFF\x00\x11\x00\x00\x00\x00\x00",
sizeDst: 100,
want: "\uFFFD\uFFFD\uFFFD\uFFFD",
nSrc: 15,
t: utf32BEIB.NewDecoder(),
}}
for i, tc := range testCases {
b := make([]byte, tc.sizeDst)
nDst, nSrc, err := tc.t.Transform(b, []byte(tc.src), !tc.notEOF)
if err != tc.err {
t.Errorf("%d:%s: error was %v; want %v", i, tc.desc, err, tc.err)
}
if got := string(b[:nDst]); got != tc.want {
t.Errorf("%d:%s: result was %q: want %q", i, tc.desc, got, tc.want)
}
if nSrc != tc.nSrc {
t.Errorf("%d:%s: nSrc was %d; want %d", i, tc.desc, nSrc, tc.nSrc)
}
}
}