Switch to using the dep tool and update all the dependencies

This commit is contained in:
Nick Craig-Wood 2017-05-11 15:39:54 +01:00
parent 5135ff73cb
commit 98c2d2c41b
5321 changed files with 4483201 additions and 5922 deletions

198
vendor/golang.org/x/text/unicode/bidi/bidi.go generated vendored Normal file
View file

@ -0,0 +1,198 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:generate go run gen.go gen_trieval.go gen_ranges.go
// Package bidi contains functionality for bidirectional text support.
//
// See http://www.unicode.org/reports/tr9.
//
// NOTE: UNDER CONSTRUCTION. This API may change in backwards incompatible ways
// and without notice.
package bidi // import "golang.org/x/text/unicode/bidi"
// TODO:
// The following functionality would not be hard to implement, but hinges on
// the definition of a Segmenter interface. For now this is up to the user.
// - Iterate over paragraphs
// - Segmenter to iterate over runs directly from a given text.
// Also:
// - Transformer for reordering?
// - Transformer (validator, really) for Bidi Rule.
// This API tries to avoid dealing with embedding levels for now. Under the hood
// these will be computed, but the question is to which extent the user should
// know they exist. We should at some point allow the user to specify an
// embedding hierarchy, though.
// A Direction indicates the overall flow of text.
type Direction int
const (
// LeftToRight indicates the text contains no right-to-left characters and
// that either there are some left-to-right characters or the option
// DefaultDirection(LeftToRight) was passed.
LeftToRight Direction = iota
// RightToLeft indicates the text contains no left-to-right characters and
// that either there are some right-to-left characters or the option
// DefaultDirection(RightToLeft) was passed.
RightToLeft
// Mixed indicates text contains both left-to-right and right-to-left
// characters.
Mixed
// Neutral means that text contains no left-to-right and right-to-left
// characters and that no default direction has been set.
Neutral
)
type options struct{}
// An Option is an option for Bidi processing.
type Option func(*options)
// ICU allows the user to define embedding levels. This may be used, for example,
// to use hierarchical structure of markup languages to define embeddings.
// The following option may be a way to expose this functionality in this API.
// // LevelFunc sets a function that associates nesting levels with the given text.
// // The levels function will be called with monotonically increasing values for p.
// func LevelFunc(levels func(p int) int) Option {
// panic("unimplemented")
// }
// DefaultDirection sets the default direction for a Paragraph. The direction is
// overridden if the text contains directional characters.
func DefaultDirection(d Direction) Option {
panic("unimplemented")
}
// A Paragraph holds a single Paragraph for Bidi processing.
type Paragraph struct {
// buffers
}
// SetBytes configures p for the given paragraph text. It replaces text
// previously set by SetBytes or SetString. If b contains a paragraph separator
// it will only process the first paragraph and report the number of bytes
// consumed from b including this separator. Error may be non-nil if options are
// given.
func (p *Paragraph) SetBytes(b []byte, opts ...Option) (n int, err error) {
panic("unimplemented")
}
// SetString configures p for the given paragraph text. It replaces text
// previously set by SetBytes or SetString. If b contains a paragraph separator
// it will only process the first paragraph and report the number of bytes
// consumed from b including this separator. Error may be non-nil if options are
// given.
func (p *Paragraph) SetString(s string, opts ...Option) (n int, err error) {
panic("unimplemented")
}
// IsLeftToRight reports whether the principle direction of rendering for this
// paragraphs is left-to-right. If this returns false, the principle direction
// of rendering is right-to-left.
func (p *Paragraph) IsLeftToRight() bool {
panic("unimplemented")
}
// Direction returns the direction of the text of this paragraph.
//
// The direction may be LeftToRight, RightToLeft, Mixed, or Neutral.
func (p *Paragraph) Direction() Direction {
panic("unimplemented")
}
// RunAt reports the Run at the given position of the input text.
//
// This method can be used for computing line breaks on paragraphs.
func (p *Paragraph) RunAt(pos int) Run {
panic("unimplemented")
}
// Order computes the visual ordering of all the runs in a Paragraph.
func (p *Paragraph) Order() (Ordering, error) {
panic("unimplemented")
}
// Line computes the visual ordering of runs for a single line starting and
// ending at the given positions in the original text.
func (p *Paragraph) Line(start, end int) (Ordering, error) {
panic("unimplemented")
}
// An Ordering holds the computed visual order of runs of a Paragraph. Calling
// SetBytes or SetString on the originating Paragraph invalidates an Ordering.
// The methods of an Ordering should only be called by one goroutine at a time.
type Ordering struct{}
// Direction reports the directionality of the runs.
//
// The direction may be LeftToRight, RightToLeft, Mixed, or Neutral.
func (o *Ordering) Direction() Direction {
panic("unimplemented")
}
// NumRuns returns the number of runs.
func (o *Ordering) NumRuns() int {
panic("unimplemented")
}
// Run returns the ith run within the ordering.
func (o *Ordering) Run(i int) Run {
panic("unimplemented")
}
// TODO: perhaps with options.
// // Reorder creates a reader that reads the runes in visual order per character.
// // Modifiers remain after the runes they modify.
// func (l *Runs) Reorder() io.Reader {
// panic("unimplemented")
// }
// A Run is a continuous sequence of characters of a single direction.
type Run struct {
}
// String returns the text of the run in its original order.
func (r *Run) String() string {
panic("unimplemented")
}
// Bytes returns the text of the run in its original order.
func (r *Run) Bytes() []byte {
panic("unimplemented")
}
// TODO: methods for
// - Display order
// - headers and footers
// - bracket replacement.
// Direction reports the direction of the run.
func (r *Run) Direction() Direction {
panic("unimplemented")
}
// Position of the Run within the text passed to SetBytes or SetString of the
// originating Paragraph value.
func (r *Run) Pos() (start, end int) {
panic("unimplemented")
}
// AppendReverse reverses the order of characters of in, appends them to out,
// and returns the result. Modifiers will still follow the runes they modify.
// Brackets are replaced with their counterparts.
func AppendReverse(out, in []byte) []byte {
panic("unimplemented")
}
// ReverseString reverses the order of characters in s and returns a new string.
// Modifiers will still follow the runes they modify. Brackets are replaced with
// their counterparts.
func ReverseString(s string) string {
panic("unimplemented")
}

335
vendor/golang.org/x/text/unicode/bidi/bracket.go generated vendored Normal file
View file

@ -0,0 +1,335 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package bidi
import (
"container/list"
"fmt"
"sort"
)
// This file contains a port of the reference implementation of the
// Bidi Parentheses Algorithm:
// http://www.unicode.org/Public/PROGRAMS/BidiReferenceJava/BidiPBAReference.java
//
// The implementation in this file covers definitions BD14-BD16 and rule N0
// of UAX#9.
//
// Some preprocessing is done for each rune before data is passed to this
// algorithm:
// - opening and closing brackets are identified
// - a bracket pair type, like '(' and ')' is assigned a unique identifier that
// is identical for the opening and closing bracket. It is left to do these
// mappings.
// - The BPA algorithm requires that bracket characters that are canonical
// equivalents of each other be able to be substituted for each other.
// It is the responsibility of the caller to do this canonicalization.
//
// In implementing BD16, this implementation departs slightly from the "logical"
// algorithm defined in UAX#9. In particular, the stack referenced there
// supports operations that go beyond a "basic" stack. An equivalent
// implementation based on a linked list is used here.
// Bidi_Paired_Bracket_Type
// BD14. An opening paired bracket is a character whose
// Bidi_Paired_Bracket_Type property value is Open.
//
// BD15. A closing paired bracket is a character whose
// Bidi_Paired_Bracket_Type property value is Close.
type bracketType byte
const (
bpNone bracketType = iota
bpOpen
bpClose
)
// bracketPair holds a pair of index values for opening and closing bracket
// location of a bracket pair.
type bracketPair struct {
opener int
closer int
}
func (b *bracketPair) String() string {
return fmt.Sprintf("(%v, %v)", b.opener, b.closer)
}
// bracketPairs is a slice of bracketPairs with a sort.Interface implementation.
type bracketPairs []bracketPair
func (b bracketPairs) Len() int { return len(b) }
func (b bracketPairs) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
func (b bracketPairs) Less(i, j int) bool { return b[i].opener < b[j].opener }
// resolvePairedBrackets runs the paired bracket part of the UBA algorithm.
//
// For each rune, it takes the indexes into the original string, the class the
// bracket type (in pairTypes) and the bracket identifier (pairValues). It also
// takes the direction type for the start-of-sentence and the embedding level.
//
// The identifiers for bracket types are the rune of the canonicalized opening
// bracket for brackets (open or close) or 0 for runes that are not brackets.
func resolvePairedBrackets(s *isolatingRunSequence) {
p := bracketPairer{
sos: s.sos,
openers: list.New(),
codesIsolatedRun: s.types,
indexes: s.indexes,
}
dirEmbed := L
if s.level&1 != 0 {
dirEmbed = R
}
p.locateBrackets(s.p.pairTypes, s.p.pairValues)
p.resolveBrackets(dirEmbed, s.p.initialTypes)
}
type bracketPairer struct {
sos Class // direction corresponding to start of sequence
// The following is a restatement of BD 16 using non-algorithmic language.
//
// A bracket pair is a pair of characters consisting of an opening
// paired bracket and a closing paired bracket such that the
// Bidi_Paired_Bracket property value of the former equals the latter,
// subject to the following constraints.
// - both characters of a pair occur in the same isolating run sequence
// - the closing character of a pair follows the opening character
// - any bracket character can belong at most to one pair, the earliest possible one
// - any bracket character not part of a pair is treated like an ordinary character
// - pairs may nest properly, but their spans may not overlap otherwise
// Bracket characters with canonical decompositions are supposed to be
// treated as if they had been normalized, to allow normalized and non-
// normalized text to give the same result. In this implementation that step
// is pushed out to the caller. The caller has to ensure that the pairValue
// slices contain the rune of the opening bracket after normalization for
// any opening or closing bracket.
openers *list.List // list of positions for opening brackets
// bracket pair positions sorted by location of opening bracket
pairPositions bracketPairs
codesIsolatedRun []Class // directional bidi codes for an isolated run
indexes []int // array of index values into the original string
}
// matchOpener reports whether characters at given positions form a matching
// bracket pair.
func (p *bracketPairer) matchOpener(pairValues []rune, opener, closer int) bool {
return pairValues[p.indexes[opener]] == pairValues[p.indexes[closer]]
}
const maxPairingDepth = 63
// locateBrackets locates matching bracket pairs according to BD16.
//
// This implementation uses a linked list instead of a stack, because, while
// elements are added at the front (like a push) they are not generally removed
// in atomic 'pop' operations, reducing the benefit of the stack archetype.
func (p *bracketPairer) locateBrackets(pairTypes []bracketType, pairValues []rune) {
// traverse the run
// do that explicitly (not in a for-each) so we can record position
for i, index := range p.indexes {
// look at the bracket type for each character
if pairTypes[index] == bpNone || p.codesIsolatedRun[i] != ON {
// continue scanning
continue
}
switch pairTypes[index] {
case bpOpen:
// check if maximum pairing depth reached
if p.openers.Len() == maxPairingDepth {
p.openers.Init()
return
}
// remember opener location, most recent first
p.openers.PushFront(i)
case bpClose:
// see if there is a match
count := 0
for elem := p.openers.Front(); elem != nil; elem = elem.Next() {
count++
opener := elem.Value.(int)
if p.matchOpener(pairValues, opener, i) {
// if the opener matches, add nested pair to the ordered list
p.pairPositions = append(p.pairPositions, bracketPair{opener, i})
// remove up to and including matched opener
for ; count > 0; count-- {
p.openers.Remove(p.openers.Front())
}
break
}
}
sort.Sort(p.pairPositions)
// if we get here, the closing bracket matched no openers
// and gets ignored
}
}
}
// Bracket pairs within an isolating run sequence are processed as units so
// that both the opening and the closing paired bracket in a pair resolve to
// the same direction.
//
// N0. Process bracket pairs in an isolating run sequence sequentially in
// the logical order of the text positions of the opening paired brackets
// using the logic given below. Within this scope, bidirectional types EN
// and AN are treated as R.
//
// Identify the bracket pairs in the current isolating run sequence
// according to BD16. For each bracket-pair element in the list of pairs of
// text positions:
//
// a Inspect the bidirectional types of the characters enclosed within the
// bracket pair.
//
// b If any strong type (either L or R) matching the embedding direction is
// found, set the type for both brackets in the pair to match the embedding
// direction.
//
// o [ e ] o -> o e e e o
//
// o [ o e ] -> o e o e e
//
// o [ NI e ] -> o e NI e e
//
// c Otherwise, if a strong type (opposite the embedding direction) is
// found, test for adjacent strong types as follows: 1 First, check
// backwards before the opening paired bracket until the first strong type
// (L, R, or sos) is found. If that first preceding strong type is opposite
// the embedding direction, then set the type for both brackets in the pair
// to that type. 2 Otherwise, set the type for both brackets in the pair to
// the embedding direction.
//
// o [ o ] e -> o o o o e
//
// o [ o NI ] o -> o o o NI o o
//
// e [ o ] o -> e e o e o
//
// e [ o ] e -> e e o e e
//
// e ( o [ o ] NI ) e -> e e o o o o NI e e
//
// d Otherwise, do not set the type for the current bracket pair. Note that
// if the enclosed text contains no strong types the paired brackets will
// both resolve to the same level when resolved individually using rules N1
// and N2.
//
// e ( NI ) o -> e ( NI ) o
// getStrongTypeN0 maps character's directional code to strong type as required
// by rule N0.
//
// TODO: have separate type for "strong" directionality.
func (p *bracketPairer) getStrongTypeN0(index int) Class {
switch p.codesIsolatedRun[index] {
// in the scope of N0, number types are treated as R
case EN, AN, AL, R:
return R
case L:
return L
default:
return ON
}
}
// classifyPairContent reports the strong types contained inside a Bracket Pair,
// assuming the given embedding direction.
//
// It returns ON if no strong type is found. If a single strong type is found,
// it returns this this type. Otherwise it returns the embedding direction.
//
// TODO: use separate type for "strong" directionality.
func (p *bracketPairer) classifyPairContent(loc bracketPair, dirEmbed Class) Class {
dirOpposite := ON
for i := loc.opener + 1; i < loc.closer; i++ {
dir := p.getStrongTypeN0(i)
if dir == ON {
continue
}
if dir == dirEmbed {
return dir // type matching embedding direction found
}
dirOpposite = dir
}
// return ON if no strong type found, or class opposite to dirEmbed
return dirOpposite
}
// classBeforePair determines which strong types are present before a Bracket
// Pair. Return R or L if strong type found, otherwise ON.
func (p *bracketPairer) classBeforePair(loc bracketPair) Class {
for i := loc.opener - 1; i >= 0; i-- {
if dir := p.getStrongTypeN0(i); dir != ON {
return dir
}
}
// no strong types found, return sos
return p.sos
}
// assignBracketType implements rule N0 for a single bracket pair.
func (p *bracketPairer) assignBracketType(loc bracketPair, dirEmbed Class, initialTypes []Class) {
// rule "N0, a", inspect contents of pair
dirPair := p.classifyPairContent(loc, dirEmbed)
// dirPair is now L, R, or N (no strong type found)
// the following logical tests are performed out of order compared to
// the statement of the rules but yield the same results
if dirPair == ON {
return // case "d" - nothing to do
}
if dirPair != dirEmbed {
// case "c": strong type found, opposite - check before (c.1)
dirPair = p.classBeforePair(loc)
if dirPair == dirEmbed || dirPair == ON {
// no strong opposite type found before - use embedding (c.2)
dirPair = dirEmbed
}
}
// else: case "b", strong type found matching embedding,
// no explicit action needed, as dirPair is already set to embedding
// direction
// set the bracket types to the type found
p.setBracketsToType(loc, dirPair, initialTypes)
}
func (p *bracketPairer) setBracketsToType(loc bracketPair, dirPair Class, initialTypes []Class) {
p.codesIsolatedRun[loc.opener] = dirPair
p.codesIsolatedRun[loc.closer] = dirPair
for i := loc.opener + 1; i < loc.closer; i++ {
index := p.indexes[i]
if initialTypes[index] != NSM {
break
}
p.codesIsolatedRun[i] = dirPair
}
for i := loc.closer + 1; i < len(p.indexes); i++ {
index := p.indexes[i]
if initialTypes[index] != NSM {
break
}
p.codesIsolatedRun[i] = dirPair
}
}
// resolveBrackets implements rule N0 for a list of pairs.
func (p *bracketPairer) resolveBrackets(dirEmbed Class, initialTypes []Class) {
for _, loc := range p.pairPositions {
p.assignBracketType(loc, dirEmbed, initialTypes)
}
}

1058
vendor/golang.org/x/text/unicode/bidi/core.go generated vendored Normal file

File diff suppressed because it is too large Load diff

224
vendor/golang.org/x/text/unicode/bidi/core_test.go generated vendored Normal file
View file

@ -0,0 +1,224 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package bidi
import (
"flag"
"fmt"
"log"
"strconv"
"strings"
"testing"
"golang.org/x/text/internal/gen"
"golang.org/x/text/internal/testtext"
"golang.org/x/text/internal/ucd"
"golang.org/x/text/unicode/norm"
)
var testLevels = flag.Bool("levels", false, "enable testing of levels")
// TestBidiCore performs the tests in BidiTest.txt.
// See http://www.unicode.org/Public/UCD/latest/ucd/BidiTest.txt.
func TestBidiCore(t *testing.T) {
testtext.SkipIfNotLong(t)
r := gen.OpenUCDFile("BidiTest.txt")
defer r.Close()
var wantLevels, wantOrder []string
p := ucd.New(r, ucd.Part(func(p *ucd.Parser) {
s := strings.Split(p.String(0), ":")
switch s[0] {
case "Levels":
wantLevels = strings.Fields(s[1])
case "Reorder":
wantOrder = strings.Fields(s[1])
default:
log.Fatalf("Unknown part %q.", s[0])
}
}))
for p.Next() {
types := []Class{}
for _, s := range p.Strings(0) {
types = append(types, bidiClass[s])
}
// We ignore the bracketing part of the algorithm.
pairTypes := make([]bracketType, len(types))
pairValues := make([]rune, len(types))
for i := uint(0); i < 3; i++ {
if p.Uint(1)&(1<<i) == 0 {
continue
}
lev := level(int(i) - 1)
par := newParagraph(types, pairTypes, pairValues, lev)
if *testLevels {
levels := par.getLevels([]int{len(types)})
for i, s := range wantLevels {
if s == "x" {
continue
}
l, _ := strconv.ParseUint(s, 10, 8)
if level(l)&1 != levels[i]&1 {
t.Errorf("%s:%d:levels: got %v; want %v", p.String(0), lev, levels, wantLevels)
break
}
}
}
order := par.getReordering([]int{len(types)})
gotOrder := filterOrder(types, order)
if got, want := fmt.Sprint(gotOrder), fmt.Sprint(wantOrder); got != want {
t.Errorf("%s:%d:order: got %v; want %v\noriginal %v", p.String(0), lev, got, want, order)
}
}
}
if err := p.Err(); err != nil {
log.Fatal(err)
}
}
var removeClasses = map[Class]bool{
LRO: true,
RLO: true,
RLE: true,
LRE: true,
PDF: true,
BN: true,
}
// TestBidiCharacters performs the tests in BidiCharacterTest.txt.
// See http://www.unicode.org/Public/UCD/latest/ucd/BidiCharacterTest.txt
func TestBidiCharacters(t *testing.T) {
testtext.SkipIfNotLong(t)
ucd.Parse(gen.OpenUCDFile("BidiCharacterTest.txt"), func(p *ucd.Parser) {
var (
types []Class
pairTypes []bracketType
pairValues []rune
parLevel level
wantLevel = level(p.Int(2))
wantLevels = p.Strings(3)
wantVisualOrder = p.Strings(4)
)
switch l := p.Int(1); l {
case 0, 1:
parLevel = level(l)
case 2:
parLevel = implicitLevel
default:
// Spec says to ignore unknown parts.
}
runes := p.Runes(0)
for _, r := range runes {
// Assign the bracket type.
if d := norm.NFKD.PropertiesString(string(r)).Decomposition(); d != nil {
r = []rune(string(d))[0]
}
p, _ := LookupRune(r)
// Assign the class for this rune.
types = append(types, p.Class())
switch {
case !p.IsBracket():
pairTypes = append(pairTypes, bpNone)
pairValues = append(pairValues, 0)
case p.IsOpeningBracket():
pairTypes = append(pairTypes, bpOpen)
pairValues = append(pairValues, r)
default:
pairTypes = append(pairTypes, bpClose)
pairValues = append(pairValues, p.reverseBracket(r))
}
}
par := newParagraph(types, pairTypes, pairValues, parLevel)
// Test results:
if got := par.embeddingLevel; got != wantLevel {
t.Errorf("%v:level: got %d; want %d", string(runes), got, wantLevel)
}
if *testLevels {
gotLevels := getLevelStrings(types, par.getLevels([]int{len(types)}))
if got, want := fmt.Sprint(gotLevels), fmt.Sprint(wantLevels); got != want {
t.Errorf("%04X %q:%d: got %v; want %v\nval: %x\npair: %v", runes, string(runes), parLevel, got, want, pairValues, pairTypes)
}
}
order := par.getReordering([]int{len(types)})
order = filterOrder(types, order)
if got, want := fmt.Sprint(order), fmt.Sprint(wantVisualOrder); got != want {
t.Errorf("%04X %q:%d: got %v; want %v\ngot order: %s", runes, string(runes), parLevel, got, want, reorder(runes, order))
}
})
}
func getLevelStrings(cl []Class, levels []level) []string {
var results []string
for i, l := range levels {
if !removeClasses[cl[i]] {
results = append(results, fmt.Sprint(l))
} else {
results = append(results, "x")
}
}
return results
}
func filterOrder(cl []Class, order []int) []int {
no := []int{}
for _, o := range order {
if !removeClasses[cl[o]] {
no = append(no, o)
}
}
return no
}
func reorder(r []rune, order []int) string {
nr := make([]rune, len(order))
for i, o := range order {
nr[i] = r[o]
}
return string(nr)
}
// bidiClass names and codes taken from class "bc" in
// http://www.unicode.org/Public/8.0.0/ucd/PropertyValueAliases.txt
var bidiClass = map[string]Class{
"AL": AL, // classArabicLetter,
"AN": AN, // classArabicNumber,
"B": B, // classParagraphSeparator,
"BN": BN, // classBoundaryNeutral,
"CS": CS, // classCommonSeparator,
"EN": EN, // classEuropeanNumber,
"ES": ES, // classEuropeanSeparator,
"ET": ET, // classEuropeanTerminator,
"L": L, // classLeftToRight,
"NSM": NSM, // classNonspacingMark,
"ON": ON, // classOtherNeutral,
"R": R, // classRightToLeft,
"S": S, // classSegmentSeparator,
"WS": WS, // classWhiteSpace,
"LRO": LRO, // classLeftToRightOverride,
"RLO": RLO, // classRightToLeftOverride,
"LRE": LRE, // classLeftToRightEmbedding,
"RLE": RLE, // classRightToLeftEmbedding,
"PDF": PDF, // classPopDirectionalFormat,
"LRI": LRI, // classLeftToRightIsolate,
"RLI": RLI, // classRightToLeftIsolate,
"FSI": FSI, // classFirstStrongIsolate,
"PDI": PDI, // classPopDirectionalIsolate,
}

133
vendor/golang.org/x/text/unicode/bidi/gen.go generated vendored Normal file
View file

@ -0,0 +1,133 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
package main
import (
"flag"
"log"
"golang.org/x/text/internal/gen"
"golang.org/x/text/internal/triegen"
"golang.org/x/text/internal/ucd"
)
var outputFile = flag.String("out", "tables.go", "output file")
func main() {
gen.Init()
gen.Repackage("gen_trieval.go", "trieval.go", "bidi")
gen.Repackage("gen_ranges.go", "ranges_test.go", "bidi")
genTables()
}
// bidiClass names and codes taken from class "bc" in
// http://www.unicode.org/Public/8.0.0/ucd/PropertyValueAliases.txt
var bidiClass = map[string]Class{
"AL": AL, // ArabicLetter
"AN": AN, // ArabicNumber
"B": B, // ParagraphSeparator
"BN": BN, // BoundaryNeutral
"CS": CS, // CommonSeparator
"EN": EN, // EuropeanNumber
"ES": ES, // EuropeanSeparator
"ET": ET, // EuropeanTerminator
"L": L, // LeftToRight
"NSM": NSM, // NonspacingMark
"ON": ON, // OtherNeutral
"R": R, // RightToLeft
"S": S, // SegmentSeparator
"WS": WS, // WhiteSpace
"FSI": Control,
"PDF": Control,
"PDI": Control,
"LRE": Control,
"LRI": Control,
"LRO": Control,
"RLE": Control,
"RLI": Control,
"RLO": Control,
}
func genTables() {
if numClass > 0x0F {
log.Fatalf("Too many Class constants (%#x > 0x0F).", numClass)
}
w := gen.NewCodeWriter()
defer w.WriteGoFile(*outputFile, "bidi")
gen.WriteUnicodeVersion(w)
t := triegen.NewTrie("bidi")
// Build data about bracket mapping. These bits need to be or-ed with
// any other bits.
orMask := map[rune]uint64{}
xorMap := map[rune]int{}
xorMasks := []rune{0} // First value is no-op.
ucd.Parse(gen.OpenUCDFile("BidiBrackets.txt"), func(p *ucd.Parser) {
r1 := p.Rune(0)
r2 := p.Rune(1)
xor := r1 ^ r2
if _, ok := xorMap[xor]; !ok {
xorMap[xor] = len(xorMasks)
xorMasks = append(xorMasks, xor)
}
entry := uint64(xorMap[xor]) << xorMaskShift
switch p.String(2) {
case "o":
entry |= openMask
case "c", "n":
default:
log.Fatalf("Unknown bracket class %q.", p.String(2))
}
orMask[r1] = entry
})
w.WriteComment(`
xorMasks contains masks to be xor-ed with brackets to get the reverse
version.`)
w.WriteVar("xorMasks", xorMasks)
done := map[rune]bool{}
insert := func(r rune, c Class) {
if !done[r] {
t.Insert(r, orMask[r]|uint64(c))
done[r] = true
}
}
// Insert the derived BiDi properties.
ucd.Parse(gen.OpenUCDFile("extracted/DerivedBidiClass.txt"), func(p *ucd.Parser) {
r := p.Rune(0)
class, ok := bidiClass[p.String(1)]
if !ok {
log.Fatalf("%U: Unknown BiDi class %q", r, p.String(1))
}
insert(r, class)
})
visitDefaults(insert)
// TODO: use sparse blocks. This would reduce table size considerably
// from the looks of it.
sz, err := t.Gen(w)
if err != nil {
log.Fatal(err)
}
w.Size += sz
}
// dummy values to make methods in gen_common compile. The real versions
// will be generated by this file to tables.go.
var (
xorMasks []rune
)

57
vendor/golang.org/x/text/unicode/bidi/gen_ranges.go generated vendored Normal file
View file

@ -0,0 +1,57 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
package main
import (
"unicode"
"golang.org/x/text/internal/gen"
"golang.org/x/text/internal/ucd"
"golang.org/x/text/unicode/rangetable"
)
// These tables are hand-extracted from:
// http://www.unicode.org/Public/8.0.0/ucd/extracted/DerivedBidiClass.txt
func visitDefaults(fn func(r rune, c Class)) {
// first write default values for ranges listed above.
visitRunes(fn, AL, []rune{
0x0600, 0x07BF, // Arabic
0x08A0, 0x08FF, // Arabic Extended-A
0xFB50, 0xFDCF, // Arabic Presentation Forms
0xFDF0, 0xFDFF,
0xFE70, 0xFEFF,
0x0001EE00, 0x0001EEFF, // Arabic Mathematical Alpha Symbols
})
visitRunes(fn, R, []rune{
0x0590, 0x05FF, // Hebrew
0x07C0, 0x089F, // Nko et al.
0xFB1D, 0xFB4F,
0x00010800, 0x00010FFF, // Cypriot Syllabary et. al.
0x0001E800, 0x0001EDFF,
0x0001EF00, 0x0001EFFF,
})
visitRunes(fn, ET, []rune{ // European Terminator
0x20A0, 0x20Cf, // Currency symbols
})
rangetable.Visit(unicode.Noncharacter_Code_Point, func(r rune) {
fn(r, BN) // Boundary Neutral
})
ucd.Parse(gen.OpenUCDFile("DerivedCoreProperties.txt"), func(p *ucd.Parser) {
if p.String(1) == "Default_Ignorable_Code_Point" {
fn(p.Rune(0), BN) // Boundary Neutral
}
})
}
func visitRunes(fn func(r rune, c Class), c Class, runes []rune) {
for i := 0; i < len(runes); i += 2 {
lo, hi := runes[i], runes[i+1]
for j := lo; j <= hi; j++ {
fn(j, c)
}
}
}

64
vendor/golang.org/x/text/unicode/bidi/gen_trieval.go generated vendored Normal file
View file

@ -0,0 +1,64 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
package main
// Class is the Unicode BiDi class. Each rune has a single class.
type Class uint
const (
L Class = iota // LeftToRight
R // RightToLeft
EN // EuropeanNumber
ES // EuropeanSeparator
ET // EuropeanTerminator
AN // ArabicNumber
CS // CommonSeparator
B // ParagraphSeparator
S // SegmentSeparator
WS // WhiteSpace
ON // OtherNeutral
BN // BoundaryNeutral
NSM // NonspacingMark
AL // ArabicLetter
Control // Control LRO - PDI
numClass
LRO // LeftToRightOverride
RLO // RightToLeftOverride
LRE // LeftToRightEmbedding
RLE // RightToLeftEmbedding
PDF // PopDirectionalFormat
LRI // LeftToRightIsolate
RLI // RightToLeftIsolate
FSI // FirstStrongIsolate
PDI // PopDirectionalIsolate
unknownClass = ^Class(0)
)
var controlToClass = map[rune]Class{
0x202D: LRO, // LeftToRightOverride,
0x202E: RLO, // RightToLeftOverride,
0x202A: LRE, // LeftToRightEmbedding,
0x202B: RLE, // RightToLeftEmbedding,
0x202C: PDF, // PopDirectionalFormat,
0x2066: LRI, // LeftToRightIsolate,
0x2067: RLI, // RightToLeftIsolate,
0x2068: FSI, // FirstStrongIsolate,
0x2069: PDI, // PopDirectionalIsolate,
}
// A trie entry has the following bits:
// 7..5 XOR mask for brackets
// 4 1: Bracket open, 0: Bracket close
// 3..0 Class type
const (
openMask = 0x10
xorMaskShift = 5
)

206
vendor/golang.org/x/text/unicode/bidi/prop.go generated vendored Normal file
View file

@ -0,0 +1,206 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package bidi
import "unicode/utf8"
// Properties provides access to BiDi properties of runes.
type Properties struct {
entry uint8
last uint8
}
var trie = newBidiTrie(0)
// TODO: using this for bidirule reduces the running time by about 5%. Consider
// if this is worth exposing or if we can find a way to speed up the Class
// method.
//
// // CompactClass is like Class, but maps all of the BiDi control classes
// // (LRO, RLO, LRE, RLE, PDF, LRI, RLI, FSI, PDI) to the class Control.
// func (p Properties) CompactClass() Class {
// return Class(p.entry & 0x0F)
// }
// Class returns the Bidi class for p.
func (p Properties) Class() Class {
c := Class(p.entry & 0x0F)
if c == Control {
c = controlByteToClass[p.last&0xF]
}
return c
}
// IsBracket reports whether the rune is a bracket.
func (p Properties) IsBracket() bool { return p.entry&0xF0 != 0 }
// IsOpeningBracket reports whether the rune is an opening bracket.
// IsBracket must return true.
func (p Properties) IsOpeningBracket() bool { return p.entry&openMask != 0 }
// TODO: find a better API and expose.
func (p Properties) reverseBracket(r rune) rune {
return xorMasks[p.entry>>xorMaskShift] ^ r
}
var controlByteToClass = [16]Class{
0xD: LRO, // U+202D LeftToRightOverride,
0xE: RLO, // U+202E RightToLeftOverride,
0xA: LRE, // U+202A LeftToRightEmbedding,
0xB: RLE, // U+202B RightToLeftEmbedding,
0xC: PDF, // U+202C PopDirectionalFormat,
0x6: LRI, // U+2066 LeftToRightIsolate,
0x7: RLI, // U+2067 RightToLeftIsolate,
0x8: FSI, // U+2068 FirstStrongIsolate,
0x9: PDI, // U+2069 PopDirectionalIsolate,
}
// LookupRune returns properties for r.
func LookupRune(r rune) (p Properties, size int) {
var buf [4]byte
n := utf8.EncodeRune(buf[:], r)
return Lookup(buf[:n])
}
// TODO: these lookup methods are based on the generated trie code. The returned
// sizes have slightly different semantics from the generated code, in that it
// always returns size==1 for an illegal UTF-8 byte (instead of the length
// of the maximum invalid subsequence). Most Transformers, like unicode/norm,
// leave invalid UTF-8 untouched, in which case it has performance benefits to
// do so (without changing the semantics). Bidi requires the semantics used here
// for the bidirule implementation to be compatible with the Go semantics.
// They ultimately should perhaps be adopted by all trie implementations, for
// convenience sake.
// This unrolled code also boosts performance of the secure/bidirule package by
// about 30%.
// So, to remove this code:
// - add option to trie generator to define return type.
// - always return 1 byte size for ill-formed UTF-8 runes.
// Lookup returns properties for the first rune in s and the width in bytes of
// its encoding. The size will be 0 if s does not hold enough bytes to complete
// the encoding.
func Lookup(s []byte) (p Properties, sz int) {
c0 := s[0]
switch {
case c0 < 0x80: // is ASCII
return Properties{entry: bidiValues[c0]}, 1
case c0 < 0xC2:
return Properties{}, 1
case c0 < 0xE0: // 2-byte UTF-8
if len(s) < 2 {
return Properties{}, 0
}
i := bidiIndex[c0]
c1 := s[1]
if c1 < 0x80 || 0xC0 <= c1 {
return Properties{}, 1
}
return Properties{entry: trie.lookupValue(uint32(i), c1)}, 2
case c0 < 0xF0: // 3-byte UTF-8
if len(s) < 3 {
return Properties{}, 0
}
i := bidiIndex[c0]
c1 := s[1]
if c1 < 0x80 || 0xC0 <= c1 {
return Properties{}, 1
}
o := uint32(i)<<6 + uint32(c1)
i = bidiIndex[o]
c2 := s[2]
if c2 < 0x80 || 0xC0 <= c2 {
return Properties{}, 1
}
return Properties{entry: trie.lookupValue(uint32(i), c2), last: c2}, 3
case c0 < 0xF8: // 4-byte UTF-8
if len(s) < 4 {
return Properties{}, 0
}
i := bidiIndex[c0]
c1 := s[1]
if c1 < 0x80 || 0xC0 <= c1 {
return Properties{}, 1
}
o := uint32(i)<<6 + uint32(c1)
i = bidiIndex[o]
c2 := s[2]
if c2 < 0x80 || 0xC0 <= c2 {
return Properties{}, 1
}
o = uint32(i)<<6 + uint32(c2)
i = bidiIndex[o]
c3 := s[3]
if c3 < 0x80 || 0xC0 <= c3 {
return Properties{}, 1
}
return Properties{entry: trie.lookupValue(uint32(i), c3)}, 4
}
// Illegal rune
return Properties{}, 1
}
// LookupString returns properties for the first rune in s and the width in
// bytes of its encoding. The size will be 0 if s does not hold enough bytes to
// complete the encoding.
func LookupString(s string) (p Properties, sz int) {
c0 := s[0]
switch {
case c0 < 0x80: // is ASCII
return Properties{entry: bidiValues[c0]}, 1
case c0 < 0xC2:
return Properties{}, 1
case c0 < 0xE0: // 2-byte UTF-8
if len(s) < 2 {
return Properties{}, 0
}
i := bidiIndex[c0]
c1 := s[1]
if c1 < 0x80 || 0xC0 <= c1 {
return Properties{}, 1
}
return Properties{entry: trie.lookupValue(uint32(i), c1)}, 2
case c0 < 0xF0: // 3-byte UTF-8
if len(s) < 3 {
return Properties{}, 0
}
i := bidiIndex[c0]
c1 := s[1]
if c1 < 0x80 || 0xC0 <= c1 {
return Properties{}, 1
}
o := uint32(i)<<6 + uint32(c1)
i = bidiIndex[o]
c2 := s[2]
if c2 < 0x80 || 0xC0 <= c2 {
return Properties{}, 1
}
return Properties{entry: trie.lookupValue(uint32(i), c2), last: c2}, 3
case c0 < 0xF8: // 4-byte UTF-8
if len(s) < 4 {
return Properties{}, 0
}
i := bidiIndex[c0]
c1 := s[1]
if c1 < 0x80 || 0xC0 <= c1 {
return Properties{}, 1
}
o := uint32(i)<<6 + uint32(c1)
i = bidiIndex[o]
c2 := s[2]
if c2 < 0x80 || 0xC0 <= c2 {
return Properties{}, 1
}
o = uint32(i)<<6 + uint32(c2)
i = bidiIndex[o]
c3 := s[3]
if c3 < 0x80 || 0xC0 <= c3 {
return Properties{}, 1
}
return Properties{entry: trie.lookupValue(uint32(i), c3)}, 4
}
// Illegal rune
return Properties{}, 1
}

53
vendor/golang.org/x/text/unicode/bidi/ranges_test.go generated vendored Normal file
View file

@ -0,0 +1,53 @@
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
package bidi
import (
"unicode"
"golang.org/x/text/internal/gen"
"golang.org/x/text/internal/ucd"
"golang.org/x/text/unicode/rangetable"
)
// These tables are hand-extracted from:
// http://www.unicode.org/Public/8.0.0/ucd/extracted/DerivedBidiClass.txt
func visitDefaults(fn func(r rune, c Class)) {
// first write default values for ranges listed above.
visitRunes(fn, AL, []rune{
0x0600, 0x07BF, // Arabic
0x08A0, 0x08FF, // Arabic Extended-A
0xFB50, 0xFDCF, // Arabic Presentation Forms
0xFDF0, 0xFDFF,
0xFE70, 0xFEFF,
0x0001EE00, 0x0001EEFF, // Arabic Mathematical Alpha Symbols
})
visitRunes(fn, R, []rune{
0x0590, 0x05FF, // Hebrew
0x07C0, 0x089F, // Nko et al.
0xFB1D, 0xFB4F,
0x00010800, 0x00010FFF, // Cypriot Syllabary et. al.
0x0001E800, 0x0001EDFF,
0x0001EF00, 0x0001EFFF,
})
visitRunes(fn, ET, []rune{ // European Terminator
0x20A0, 0x20Cf, // Currency symbols
})
rangetable.Visit(unicode.Noncharacter_Code_Point, func(r rune) {
fn(r, BN) // Boundary Neutral
})
ucd.Parse(gen.OpenUCDFile("DerivedCoreProperties.txt"), func(p *ucd.Parser) {
if p.String(1) == "Default_Ignorable_Code_Point" {
fn(p.Rune(0), BN) // Boundary Neutral
}
})
}
func visitRunes(fn func(r rune, c Class), c Class, runes []rune) {
for i := 0; i < len(runes); i += 2 {
lo, hi := runes[i], runes[i+1]
for j := lo; j <= hi; j++ {
fn(j, c)
}
}
}

1779
vendor/golang.org/x/text/unicode/bidi/tables.go generated vendored Normal file

File diff suppressed because it is too large Load diff

82
vendor/golang.org/x/text/unicode/bidi/tables_test.go generated vendored Normal file
View file

@ -0,0 +1,82 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package bidi
import (
"testing"
"golang.org/x/text/internal/gen"
"golang.org/x/text/internal/testtext"
"golang.org/x/text/internal/ucd"
)
var labels = []string{
AL: "AL",
AN: "AN",
B: "B",
BN: "BN",
CS: "CS",
EN: "EN",
ES: "ES",
ET: "ET",
L: "L",
NSM: "NSM",
ON: "ON",
R: "R",
S: "S",
WS: "WS",
LRO: "LRO",
RLO: "RLO",
LRE: "LRE",
RLE: "RLE",
PDF: "PDF",
LRI: "LRI",
RLI: "RLI",
FSI: "FSI",
PDI: "PDI",
}
func TestTables(t *testing.T) {
testtext.SkipIfNotLong(t)
ucd.Parse(gen.OpenUCDFile("BidiBrackets.txt"), func(p *ucd.Parser) {
r1 := p.Rune(0)
want := p.Rune(1)
e, _ := LookupRune(r1)
if got := e.reverseBracket(r1); got != want {
t.Errorf("Reverse(%U) = %U; want %U", r1, got, want)
}
})
done := map[rune]bool{}
test := func(name string, r rune, want string) {
str := string(r)
e, _ := LookupString(str)
if got := labels[e.Class()]; got != want {
t.Errorf("%s:%U: got %s; want %s", name, r, got, want)
}
if e2, sz := LookupRune(r); e != e2 || sz != len(str) {
t.Errorf("LookupRune(%U) = %v, %d; want %v, %d", r, e2, e, sz, len(str))
}
if e2, sz := Lookup([]byte(str)); e != e2 || sz != len(str) {
t.Errorf("Lookup(%U) = %v, %d; want %v, %d", r, e2, e, sz, len(str))
}
done[r] = true
}
// Insert the derived BiDi properties.
ucd.Parse(gen.OpenUCDFile("extracted/DerivedBidiClass.txt"), func(p *ucd.Parser) {
r := p.Rune(0)
test("derived", r, p.String(1))
})
visitDefaults(func(r rune, c Class) {
if !done[r] {
test("default", r, labels[c])
}
})
}

60
vendor/golang.org/x/text/unicode/bidi/trieval.go generated vendored Normal file
View file

@ -0,0 +1,60 @@
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
package bidi
// Class is the Unicode BiDi class. Each rune has a single class.
type Class uint
const (
L Class = iota // LeftToRight
R // RightToLeft
EN // EuropeanNumber
ES // EuropeanSeparator
ET // EuropeanTerminator
AN // ArabicNumber
CS // CommonSeparator
B // ParagraphSeparator
S // SegmentSeparator
WS // WhiteSpace
ON // OtherNeutral
BN // BoundaryNeutral
NSM // NonspacingMark
AL // ArabicLetter
Control // Control LRO - PDI
numClass
LRO // LeftToRightOverride
RLO // RightToLeftOverride
LRE // LeftToRightEmbedding
RLE // RightToLeftEmbedding
PDF // PopDirectionalFormat
LRI // LeftToRightIsolate
RLI // RightToLeftIsolate
FSI // FirstStrongIsolate
PDI // PopDirectionalIsolate
unknownClass = ^Class(0)
)
var controlToClass = map[rune]Class{
0x202D: LRO, // LeftToRightOverride,
0x202E: RLO, // RightToLeftOverride,
0x202A: LRE, // LeftToRightEmbedding,
0x202B: RLE, // RightToLeftEmbedding,
0x202C: PDF, // PopDirectionalFormat,
0x2066: LRI, // LeftToRightIsolate,
0x2067: RLI, // RightToLeftIsolate,
0x2068: FSI, // FirstStrongIsolate,
0x2069: PDI, // PopDirectionalIsolate,
}
// A trie entry has the following bits:
// 7..5 XOR mask for brackets
// 4 1: Bracket open, 0: Bracket close
// 3..0 Class type
const (
openMask = 0x10
xorMaskShift = 5
)

100
vendor/golang.org/x/text/unicode/cldr/base.go generated vendored Normal file
View file

@ -0,0 +1,100 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cldr
import (
"encoding/xml"
"regexp"
"strconv"
)
// Elem is implemented by every XML element.
type Elem interface {
setEnclosing(Elem)
setName(string)
enclosing() Elem
GetCommon() *Common
}
type hidden struct {
CharData string `xml:",chardata"`
Alias *struct {
Common
Source string `xml:"source,attr"`
Path string `xml:"path,attr"`
} `xml:"alias"`
Def *struct {
Common
Choice string `xml:"choice,attr,omitempty"`
Type string `xml:"type,attr,omitempty"`
} `xml:"default"`
}
// Common holds several of the most common attributes and sub elements
// of an XML element.
type Common struct {
XMLName xml.Name
name string
enclElem Elem
Type string `xml:"type,attr,omitempty"`
Reference string `xml:"reference,attr,omitempty"`
Alt string `xml:"alt,attr,omitempty"`
ValidSubLocales string `xml:"validSubLocales,attr,omitempty"`
Draft string `xml:"draft,attr,omitempty"`
hidden
}
// Default returns the default type to select from the enclosed list
// or "" if no default value is specified.
func (e *Common) Default() string {
if e.Def == nil {
return ""
}
if e.Def.Choice != "" {
return e.Def.Choice
} else if e.Def.Type != "" {
// Type is still used by the default element in collation.
return e.Def.Type
}
return ""
}
// GetCommon returns e. It is provided such that Common implements Elem.
func (e *Common) GetCommon() *Common {
return e
}
// Data returns the character data accumulated for this element.
func (e *Common) Data() string {
e.CharData = charRe.ReplaceAllStringFunc(e.CharData, replaceUnicode)
return e.CharData
}
func (e *Common) setName(s string) {
e.name = s
}
func (e *Common) enclosing() Elem {
return e.enclElem
}
func (e *Common) setEnclosing(en Elem) {
e.enclElem = en
}
// Escape characters that can be escaped without further escaping the string.
var charRe = regexp.MustCompile(`&#x[0-9a-fA-F]*;|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}|\\x[0-9a-fA-F]{2}|\\[0-7]{3}|\\[abtnvfr]`)
// replaceUnicode converts hexadecimal Unicode codepoint notations to a one-rune string.
// It assumes the input string is correctly formatted.
func replaceUnicode(s string) string {
if s[1] == '#' {
r, _ := strconv.ParseInt(s[3:len(s)-1], 16, 32)
return string(r)
}
r, _, _, _ := strconv.UnquoteChar(s, 0)
return string(r)
}

130
vendor/golang.org/x/text/unicode/cldr/cldr.go generated vendored Normal file
View file

@ -0,0 +1,130 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:generate go run makexml.go -output xml.go
// Package cldr provides a parser for LDML and related XML formats.
// This package is intended to be used by the table generation tools
// for the various internationalization-related packages.
// As the XML types are generated from the CLDR DTD, and as the CLDR standard
// is periodically amended, this package may change considerably over time.
// This mostly means that data may appear and disappear between versions.
// That is, old code should keep compiling for newer versions, but data
// may have moved or changed.
// CLDR version 22 is the first version supported by this package.
// Older versions may not work.
package cldr // import "golang.org/x/text/unicode/cldr"
import (
"fmt"
"sort"
)
// CLDR provides access to parsed data of the Unicode Common Locale Data Repository.
type CLDR struct {
parent map[string][]string
locale map[string]*LDML
resolved map[string]*LDML
bcp47 *LDMLBCP47
supp *SupplementalData
}
func makeCLDR() *CLDR {
return &CLDR{
parent: make(map[string][]string),
locale: make(map[string]*LDML),
resolved: make(map[string]*LDML),
bcp47: &LDMLBCP47{},
supp: &SupplementalData{},
}
}
// BCP47 returns the parsed BCP47 LDML data. If no such data was parsed, nil is returned.
func (cldr *CLDR) BCP47() *LDMLBCP47 {
return nil
}
// Draft indicates the draft level of an element.
type Draft int
const (
Approved Draft = iota
Contributed
Provisional
Unconfirmed
)
var drafts = []string{"unconfirmed", "provisional", "contributed", "approved", ""}
// ParseDraft returns the Draft value corresponding to the given string. The
// empty string corresponds to Approved.
func ParseDraft(level string) (Draft, error) {
if level == "" {
return Approved, nil
}
for i, s := range drafts {
if level == s {
return Unconfirmed - Draft(i), nil
}
}
return Approved, fmt.Errorf("cldr: unknown draft level %q", level)
}
func (d Draft) String() string {
return drafts[len(drafts)-1-int(d)]
}
// SetDraftLevel sets which draft levels to include in the evaluated LDML.
// Any draft element for which the draft level is higher than lev will be excluded.
// If multiple draft levels are available for a single element, the one with the
// lowest draft level will be selected, unless preferDraft is true, in which case
// the highest draft will be chosen.
// It is assumed that the underlying LDML is canonicalized.
func (cldr *CLDR) SetDraftLevel(lev Draft, preferDraft bool) {
// TODO: implement
cldr.resolved = make(map[string]*LDML)
}
// RawLDML returns the LDML XML for id in unresolved form.
// id must be one of the strings returned by Locales.
func (cldr *CLDR) RawLDML(loc string) *LDML {
return cldr.locale[loc]
}
// LDML returns the fully resolved LDML XML for loc, which must be one of
// the strings returned by Locales.
func (cldr *CLDR) LDML(loc string) (*LDML, error) {
return cldr.resolve(loc)
}
// Supplemental returns the parsed supplemental data. If no such data was parsed,
// nil is returned.
func (cldr *CLDR) Supplemental() *SupplementalData {
return cldr.supp
}
// Locales returns the locales for which there exist files.
// Valid sublocales for which there is no file are not included.
// The root locale is always sorted first.
func (cldr *CLDR) Locales() []string {
loc := []string{"root"}
hasRoot := false
for l, _ := range cldr.locale {
if l == "root" {
hasRoot = true
continue
}
loc = append(loc, l)
}
sort.Strings(loc[1:])
if !hasRoot {
return loc[1:]
}
return loc
}
// Get fills in the fields of x based on the XPath path.
func Get(e Elem, path string) (res Elem, err error) {
return walkXPath(e, path)
}

27
vendor/golang.org/x/text/unicode/cldr/cldr_test.go generated vendored Normal file
View file

@ -0,0 +1,27 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cldr
import "testing"
func TestParseDraft(t *testing.T) {
tests := []struct {
in string
draft Draft
err bool
}{
{"unconfirmed", Unconfirmed, false},
{"provisional", Provisional, false},
{"contributed", Contributed, false},
{"approved", Approved, false},
{"", Approved, false},
{"foo", Approved, true},
}
for _, tt := range tests {
if d, err := ParseDraft(tt.in); d != tt.draft || (err != nil) != tt.err {
t.Errorf("%q: was %v, %v; want %v, %v", tt.in, d, err != nil, tt.draft, tt.err)
}
}
}

359
vendor/golang.org/x/text/unicode/cldr/collate.go generated vendored Normal file
View file

@ -0,0 +1,359 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cldr
import (
"bufio"
"encoding/xml"
"errors"
"fmt"
"strconv"
"strings"
"unicode"
"unicode/utf8"
)
// RuleProcessor can be passed to Collator's Process method, which
// parses the rules and calls the respective method for each rule found.
type RuleProcessor interface {
Reset(anchor string, before int) error
Insert(level int, str, context, extend string) error
Index(id string)
}
const (
// cldrIndex is a Unicode-reserved sentinel value used to mark the start
// of a grouping within an index.
// We ignore any rule that starts with this rune.
// See http://unicode.org/reports/tr35/#Collation_Elements for details.
cldrIndex = "\uFDD0"
// specialAnchor is the format in which to represent logical reset positions,
// such as "first tertiary ignorable".
specialAnchor = "<%s/>"
)
// Process parses the rules for the tailorings of this collation
// and calls the respective methods of p for each rule found.
func (c Collation) Process(p RuleProcessor) (err error) {
if len(c.Cr) > 0 {
if len(c.Cr) > 1 {
return fmt.Errorf("multiple cr elements, want 0 or 1")
}
return processRules(p, c.Cr[0].Data())
}
if c.Rules.Any != nil {
return c.processXML(p)
}
return errors.New("no tailoring data")
}
// processRules parses rules in the Collation Rule Syntax defined in
// http://www.unicode.org/reports/tr35/tr35-collation.html#Collation_Tailorings.
func processRules(p RuleProcessor, s string) (err error) {
chk := func(s string, e error) string {
if err == nil {
err = e
}
return s
}
i := 0 // Save the line number for use after the loop.
scanner := bufio.NewScanner(strings.NewReader(s))
for ; scanner.Scan() && err == nil; i++ {
for s := skipSpace(scanner.Text()); s != "" && s[0] != '#'; s = skipSpace(s) {
level := 5
var ch byte
switch ch, s = s[0], s[1:]; ch {
case '&': // followed by <anchor> or '[' <key> ']'
if s = skipSpace(s); consume(&s, '[') {
s = chk(parseSpecialAnchor(p, s))
} else {
s = chk(parseAnchor(p, 0, s))
}
case '<': // sort relation '<'{1,4}, optionally followed by '*'.
for level = 1; consume(&s, '<'); level++ {
}
if level > 4 {
err = fmt.Errorf("level %d > 4", level)
}
fallthrough
case '=': // identity relation, optionally followed by *.
if consume(&s, '*') {
s = chk(parseSequence(p, level, s))
} else {
s = chk(parseOrder(p, level, s))
}
default:
chk("", fmt.Errorf("illegal operator %q", ch))
break
}
}
}
if chk("", scanner.Err()); err != nil {
return fmt.Errorf("%d: %v", i, err)
}
return nil
}
// parseSpecialAnchor parses the anchor syntax which is either of the form
// ['before' <level>] <anchor>
// or
// [<label>]
// The starting should already be consumed.
func parseSpecialAnchor(p RuleProcessor, s string) (tail string, err error) {
i := strings.IndexByte(s, ']')
if i == -1 {
return "", errors.New("unmatched bracket")
}
a := strings.TrimSpace(s[:i])
s = s[i+1:]
if strings.HasPrefix(a, "before ") {
l, err := strconv.ParseUint(skipSpace(a[len("before "):]), 10, 3)
if err != nil {
return s, err
}
return parseAnchor(p, int(l), s)
}
return s, p.Reset(fmt.Sprintf(specialAnchor, a), 0)
}
func parseAnchor(p RuleProcessor, level int, s string) (tail string, err error) {
anchor, s, err := scanString(s)
if err != nil {
return s, err
}
return s, p.Reset(anchor, level)
}
func parseOrder(p RuleProcessor, level int, s string) (tail string, err error) {
var value, context, extend string
if value, s, err = scanString(s); err != nil {
return s, err
}
if strings.HasPrefix(value, cldrIndex) {
p.Index(value[len(cldrIndex):])
return
}
if consume(&s, '|') {
if context, s, err = scanString(s); err != nil {
return s, errors.New("missing string after context")
}
}
if consume(&s, '/') {
if extend, s, err = scanString(s); err != nil {
return s, errors.New("missing string after extension")
}
}
return s, p.Insert(level, value, context, extend)
}
// scanString scans a single input string.
func scanString(s string) (str, tail string, err error) {
if s = skipSpace(s); s == "" {
return s, s, errors.New("missing string")
}
buf := [16]byte{} // small but enough to hold most cases.
value := buf[:0]
for s != "" {
if consume(&s, '\'') {
i := strings.IndexByte(s, '\'')
if i == -1 {
return "", "", errors.New(`unmatched single quote`)
}
if i == 0 {
value = append(value, '\'')
} else {
value = append(value, s[:i]...)
}
s = s[i+1:]
continue
}
r, sz := utf8.DecodeRuneInString(s)
if unicode.IsSpace(r) || strings.ContainsRune("&<=#", r) {
break
}
value = append(value, s[:sz]...)
s = s[sz:]
}
return string(value), skipSpace(s), nil
}
func parseSequence(p RuleProcessor, level int, s string) (tail string, err error) {
if s = skipSpace(s); s == "" {
return s, errors.New("empty sequence")
}
last := rune(0)
for s != "" {
r, sz := utf8.DecodeRuneInString(s)
s = s[sz:]
if r == '-' {
// We have a range. The first element was already written.
if last == 0 {
return s, errors.New("range without starter value")
}
r, sz = utf8.DecodeRuneInString(s)
s = s[sz:]
if r == utf8.RuneError || r < last {
return s, fmt.Errorf("invalid range %q-%q", last, r)
}
for i := last + 1; i <= r; i++ {
if err := p.Insert(level, string(i), "", ""); err != nil {
return s, err
}
}
last = 0
continue
}
if unicode.IsSpace(r) || unicode.IsPunct(r) {
break
}
// normal case
if err := p.Insert(level, string(r), "", ""); err != nil {
return s, err
}
last = r
}
return s, nil
}
func skipSpace(s string) string {
return strings.TrimLeftFunc(s, unicode.IsSpace)
}
// consumes returns whether the next byte is ch. If so, it gobbles it by
// updating s.
func consume(s *string, ch byte) (ok bool) {
if *s == "" || (*s)[0] != ch {
return false
}
*s = (*s)[1:]
return true
}
// The following code parses Collation rules of CLDR version 24 and before.
var lmap = map[byte]int{
'p': 1,
's': 2,
't': 3,
'i': 5,
}
type rulesElem struct {
Rules struct {
Common
Any []*struct {
XMLName xml.Name
rule
} `xml:",any"`
} `xml:"rules"`
}
type rule struct {
Value string `xml:",chardata"`
Before string `xml:"before,attr"`
Any []*struct {
XMLName xml.Name
rule
} `xml:",any"`
}
var emptyValueError = errors.New("cldr: empty rule value")
func (r *rule) value() (string, error) {
// Convert hexadecimal Unicode codepoint notation to a string.
s := charRe.ReplaceAllStringFunc(r.Value, replaceUnicode)
r.Value = s
if s == "" {
if len(r.Any) != 1 {
return "", emptyValueError
}
r.Value = fmt.Sprintf(specialAnchor, r.Any[0].XMLName.Local)
r.Any = nil
} else if len(r.Any) != 0 {
return "", fmt.Errorf("cldr: XML elements found in collation rule: %v", r.Any)
}
return r.Value, nil
}
func (r rule) process(p RuleProcessor, name, context, extend string) error {
v, err := r.value()
if err != nil {
return err
}
switch name {
case "p", "s", "t", "i":
if strings.HasPrefix(v, cldrIndex) {
p.Index(v[len(cldrIndex):])
return nil
}
if err := p.Insert(lmap[name[0]], v, context, extend); err != nil {
return err
}
case "pc", "sc", "tc", "ic":
level := lmap[name[0]]
for _, s := range v {
if err := p.Insert(level, string(s), context, extend); err != nil {
return err
}
}
default:
return fmt.Errorf("cldr: unsupported tag: %q", name)
}
return nil
}
// processXML parses the format of CLDR versions 24 and older.
func (c Collation) processXML(p RuleProcessor) (err error) {
// Collation is generated and defined in xml.go.
var v string
for _, r := range c.Rules.Any {
switch r.XMLName.Local {
case "reset":
level := 0
switch r.Before {
case "primary", "1":
level = 1
case "secondary", "2":
level = 2
case "tertiary", "3":
level = 3
case "":
default:
return fmt.Errorf("cldr: unknown level %q", r.Before)
}
v, err = r.value()
if err == nil {
err = p.Reset(v, level)
}
case "x":
var context, extend string
for _, r1 := range r.Any {
v, err = r1.value()
switch r1.XMLName.Local {
case "context":
context = v
case "extend":
extend = v
}
}
for _, r1 := range r.Any {
if t := r1.XMLName.Local; t == "context" || t == "extend" {
continue
}
r1.rule.process(p, r1.XMLName.Local, context, extend)
}
default:
err = r.rule.process(p, r.XMLName.Local, "", "")
}
if err != nil {
return err
}
}
return nil
}

275
vendor/golang.org/x/text/unicode/cldr/collate_test.go generated vendored Normal file
View file

@ -0,0 +1,275 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cldr
import (
"fmt"
"strings"
"testing"
)
// A recorder implements the RuleProcessor interface, whereby its methods
// simply record the invocations.
type recorder struct {
calls []string
}
func (r *recorder) Reset(anchor string, before int) error {
if before > 5 {
return fmt.Errorf("before %d > 5", before)
}
r.calls = append(r.calls, fmt.Sprintf("R:%s-%d", anchor, before))
return nil
}
func (r *recorder) Insert(level int, str, context, extend string) error {
s := fmt.Sprintf("O:%d:%s", level, str)
if context != "" {
s += "|" + context
}
if extend != "" {
s += "/" + extend
}
r.calls = append(r.calls, s)
return nil
}
func (r *recorder) Index(id string) {
r.calls = append(r.calls, fmt.Sprintf("I:%s", id))
}
func (r *recorder) Error(err error) {
r.calls = append(r.calls, fmt.Sprintf("E:%v", err))
}
func TestRuleProcessor(t *testing.T) {
for _, tt := range []struct {
desc string
in string
out string
}{
{desc: "empty"},
{desc: "whitespace and comments only",
in: `
# adsfads
# adfadf
`,
},
{
desc: "reset anchor",
in: `
& a
&b #
& [ before 3 ] c
& [before 4] d & ee
& [first tertiary ignorable]
&'g'
& 'h''h'h'h'
&'\u0069' # LATIN SMALL LETTER I
`,
out: `
R:a-0
R:b-0
R:c-3
R:d-4
R:ee-0
R:<first tertiary ignorable/>-0
R:g-0
R:hhhh-0
R:i-0
`,
},
{
desc: "ordering",
in: `
& 0
< 1 <<''2#
<<< 3'3''33'3#
<<<<4
= 5 << 6 | s
<<<< 7 / z
<< 8'' | s / ch
`,
out: `
R:0-0
O:1:1
O:2:'2
O:3:33333
O:4:4
O:5:5
O:2:6|s
O:4:7/z
O:2:8'|s/ch
`,
},
{
desc: "index",
in: "< '\ufdd0'A",
out: "I:A",
},
{
desc: "sequence",
in: `
& 0
<<* 1234
<* a-cde-f
=* q-q
`,
out: `
R:0-0
O:2:1
O:2:2
O:2:3
O:2:4
O:1:a
O:1:b
O:1:c
O:1:d
O:1:e
O:1:f
O:5:q
`,
},
{
desc: "compact",
in: "&B<t<<<T<s<<<S<e<<<E",
out: `
R:B-0
O:1:t
O:3:T
O:1:s
O:3:S
O:1:e
O:3:E
`,
},
{
desc: "err operator",
in: "a",
out: "E:1: illegal operator 'a'",
},
{
desc: "err line number",
in: `& a
<< b
a`,
out: `
R:a-0
O:2:b
E:3: illegal operator 'a'`,
},
{
desc: "err empty anchor",
in: " & ",
out: "E:1: missing string",
},
{
desc: "err anchor invalid special 1",
in: " & [ foo ",
out: "E:1: unmatched bracket",
},
{
desc: "err anchor invalid special 2",
in: "&[",
out: "E:1: unmatched bracket",
},
{
desc: "err anchor invalid before 1",
in: "&[before a]",
out: `E:1: strconv.ParseUint: parsing "a": invalid syntax`,
},
{
desc: "err anchor invalid before 2",
in: "&[before 12]",
out: `E:1: strconv.ParseUint: parsing "12": value out of range`,
},
{
desc: "err anchor invalid before 3",
in: "&[before 2]",
out: "E:1: missing string",
},
{
desc: "err anchor invalid before 4",
in: "&[before 6] a",
out: "E:1: before 6 > 5",
},
{
desc: "err empty order",
in: " < ",
out: "E:1: missing string",
},
{
desc: "err empty identity",
in: " = ",
out: "E:1: missing string",
},
{
desc: "err empty context",
in: " < a | ",
out: "E:1: missing string after context",
},
{
desc: "err empty extend",
in: " < a / ",
out: "E:1: missing string after extension",
},
{
desc: "err empty sequence",
in: " <* ",
out: "E:1: empty sequence",
},
{
desc: "err sequence 1",
in: " <* -a",
out: "E:1: range without starter value",
},
{
desc: "err sequence 3",
in: " <* a-a-b",
out: `O:1:a
E:1: range without starter value
`,
},
{
desc: "err sequence 3",
in: " <* b-a",
out: `O:1:b
E:1: invalid range 'b'-'a'
`,
},
{
desc: "err unmatched quote",
in: " < 'b",
out: ` E:1: unmatched single quote
`,
},
} {
rec := &recorder{}
err := Collation{
Cr: []*Common{
{hidden: hidden{CharData: tt.in}},
},
}.Process(rec)
if err != nil {
rec.Error(err)
}
got := rec.calls
want := strings.Split(strings.TrimSpace(tt.out), "\n")
if tt.out == "" {
want = nil
}
if len(got) != len(want) {
t.Errorf("%s: nResults: got %d; want %d", tt.desc, len(got), len(want))
continue
}
for i, g := range got {
if want := strings.TrimSpace(want[i]); g != want {
t.Errorf("%s:%d: got %q; want %q", tt.desc, i, g, want)
}
}
}
}

186
vendor/golang.org/x/text/unicode/cldr/data_test.go generated vendored Normal file
View file

@ -0,0 +1,186 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cldr
// This file contains test data.
import (
"io"
"strings"
)
type testLoader struct {
}
func (t testLoader) Len() int {
return len(testFiles)
}
func (t testLoader) Path(i int) string {
return testPaths[i]
}
func (t testLoader) Reader(i int) (io.ReadCloser, error) {
return &reader{*strings.NewReader(testFiles[i])}, nil
}
// reader adds a dummy Close method to strings.Reader so that it
// satisfies the io.ReadCloser interface.
type reader struct {
strings.Reader
}
func (r reader) Close() error {
return nil
}
var (
testFiles = []string{de_xml, gsw_xml, root_xml}
testPaths = []string{
"common/main/de.xml",
"common/main/gsw.xml",
"common/main/root.xml",
}
)
var root_xml = `<?xml version="1.0" encoding="UTF-8" ?>
<!DOCTYPE ldml SYSTEM "../../common/dtd/ldml.dtd">
<ldml>
<identity>
<language type="root"/>
<generation date="now"/>
</identity>
<characters>
<exemplarCharacters>[]</exemplarCharacters>
<exemplarCharacters type="auxiliary">[]</exemplarCharacters>
<exemplarCharacters type="punctuation">[\- ' &quot; \&amp; #]</exemplarCharacters>
<ellipsis type="final">{0}</ellipsis>
<ellipsis type="initial">{0}</ellipsis>
<moreInformation>?</moreInformation>
</characters>
<dates>
<calendars>
<default choice="gregorian"/>
<calendar type="buddhist">
<months>
<alias source="locale" path="../../calendar[@type='gregorian']/months"/>
</months>
</calendar>
<calendar type="chinese">
<months>
<alias source="locale" path="../../calendar[@type='gregorian']/months"/>
</months>
</calendar>
<calendar type="gregorian">
<months>
<default choice="format"/>
<monthContext type="format">
<default choice="wide"/>
<monthWidth type="narrow">
<alias source="locale" path="../../monthContext[@type='stand-alone']/monthWidth[@type='narrow']"/>
</monthWidth>
<monthWidth type="wide">
<month type="1">11</month>
<month type="2">22</month>
<month type="3">33</month>
<month type="4">44</month>
</monthWidth>
</monthContext>
<monthContext type="stand-alone">
<monthWidth type="narrow">
<month type="1">1</month>
<month type="2">2</month>
<month type="3">3</month>
<month type="4">4</month>
</monthWidth>
<monthWidth type="wide">
<alias source="locale" path="../../monthContext[@type='format']/monthWidth[@type='wide']"/>
</monthWidth>
</monthContext>
</months>
</calendar>
</calendars>
</dates>
</ldml>
`
var de_xml = `<?xml version="1.0" encoding="UTF-8" ?>
<!DOCTYPE ldml SYSTEM "../../common/dtd/ldml.dtd">
<ldml>
<identity>
<language type="de"/>
</identity>
<characters>
<exemplarCharacters>[a ä b c d e ö p q r s ß t u ü v w x y z]</exemplarCharacters>
<exemplarCharacters type="auxiliary">[á à ă]</exemplarCharacters>
<exemplarCharacters type="index">[A B C D E F G H Z]</exemplarCharacters>
<ellipsis type="final">{0} </ellipsis>
<ellipsis type="initial"> {0}</ellipsis>
<moreInformation>?</moreInformation>
<stopwords>
<stopwordList type="collation" draft="provisional">der die das</stopwordList>
</stopwords>
</characters>
<dates>
<calendars>
<calendar type="buddhist">
<months>
<monthContext type="format">
<monthWidth type="narrow">
<month type="3">BBB</month>
</monthWidth>
<monthWidth type="wide">
<month type="3">bbb</month>
</monthWidth>
</monthContext>
</months>
</calendar>
<calendar type="gregorian">
<months>
<monthContext type="format">
<monthWidth type="narrow">
<month type="3">M</month>
<month type="4">A</month>
</monthWidth>
<monthWidth type="wide">
<month type="3">Maerz</month>
<month type="4">April</month>
<month type="5">Mai</month>
</monthWidth>
</monthContext>
<monthContext type="stand-alone">
<monthWidth type="narrow">
<month type="3">m</month>
<month type="5">m</month>
</monthWidth>
<monthWidth type="wide">
<month type="4">april</month>
<month type="5">mai</month>
</monthWidth>
</monthContext>
</months>
</calendar>
</calendars>
</dates>
<posix>
<messages>
<yesstr>yes:y</yesstr>
<nostr>no:n</nostr>
</messages>
</posix>
</ldml>
`
var gsw_xml = `<?xml version="1.0" encoding="UTF-8" ?>
<!DOCTYPE ldml SYSTEM "../../common/dtd/ldml.dtd">
<ldml>
<identity>
<language type="gsw"/>
</identity>
<posix>
<alias source="de" path="//ldml/posix"/>
</posix>
</ldml>
`

171
vendor/golang.org/x/text/unicode/cldr/decode.go generated vendored Normal file
View file

@ -0,0 +1,171 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cldr
import (
"archive/zip"
"bytes"
"encoding/xml"
"fmt"
"io"
"io/ioutil"
"log"
"os"
"path/filepath"
"regexp"
)
// A Decoder loads an archive of CLDR data.
type Decoder struct {
dirFilter []string
sectionFilter []string
loader Loader
cldr *CLDR
curLocale string
}
// SetSectionFilter takes a list top-level LDML element names to which
// evaluation of LDML should be limited. It automatically calls SetDirFilter.
func (d *Decoder) SetSectionFilter(filter ...string) {
d.sectionFilter = filter
// TODO: automatically set dir filter
}
// SetDirFilter limits the loading of LDML XML files of the specied directories.
// Note that sections may be split across directories differently for different CLDR versions.
// For more robust code, use SetSectionFilter.
func (d *Decoder) SetDirFilter(dir ...string) {
d.dirFilter = dir
}
// A Loader provides access to the files of a CLDR archive.
type Loader interface {
Len() int
Path(i int) string
Reader(i int) (io.ReadCloser, error)
}
var fileRe = regexp.MustCompile(".*/(.*)/(.*)\\.xml")
// Decode loads and decodes the files represented by l.
func (d *Decoder) Decode(l Loader) (cldr *CLDR, err error) {
d.cldr = makeCLDR()
for i := 0; i < l.Len(); i++ {
fname := l.Path(i)
if m := fileRe.FindStringSubmatch(fname); m != nil {
if len(d.dirFilter) > 0 && !in(d.dirFilter, m[1]) {
continue
}
var r io.Reader
if r, err = l.Reader(i); err == nil {
err = d.decode(m[1], m[2], r)
}
if err != nil {
return nil, err
}
}
}
d.cldr.finalize(d.sectionFilter)
return d.cldr, nil
}
func (d *Decoder) decode(dir, id string, r io.Reader) error {
var v interface{}
var l *LDML
cldr := d.cldr
switch {
case dir == "supplemental":
v = cldr.supp
case dir == "transforms":
return nil
case dir == "bcp47":
v = cldr.bcp47
case dir == "validity":
return nil
default:
ok := false
if v, ok = cldr.locale[id]; !ok {
l = &LDML{}
v, cldr.locale[id] = l, l
}
}
x := xml.NewDecoder(r)
if err := x.Decode(v); err != nil {
log.Printf("%s/%s: %v", dir, id, err)
return err
}
if l != nil {
if l.Identity == nil {
return fmt.Errorf("%s/%s: missing identity element", dir, id)
}
// TODO: verify when CLDR bug http://unicode.org/cldr/trac/ticket/8970
// is resolved.
// path := strings.Split(id, "_")
// if lang := l.Identity.Language.Type; lang != path[0] {
// return fmt.Errorf("%s/%s: language was %s; want %s", dir, id, lang, path[0])
// }
}
return nil
}
type pathLoader []string
func makePathLoader(path string) (pl pathLoader, err error) {
err = filepath.Walk(path, func(path string, _ os.FileInfo, err error) error {
pl = append(pl, path)
return err
})
return pl, err
}
func (pl pathLoader) Len() int {
return len(pl)
}
func (pl pathLoader) Path(i int) string {
return pl[i]
}
func (pl pathLoader) Reader(i int) (io.ReadCloser, error) {
return os.Open(pl[i])
}
// DecodePath loads CLDR data from the given path.
func (d *Decoder) DecodePath(path string) (cldr *CLDR, err error) {
loader, err := makePathLoader(path)
if err != nil {
return nil, err
}
return d.Decode(loader)
}
type zipLoader struct {
r *zip.Reader
}
func (zl zipLoader) Len() int {
return len(zl.r.File)
}
func (zl zipLoader) Path(i int) string {
return zl.r.File[i].Name
}
func (zl zipLoader) Reader(i int) (io.ReadCloser, error) {
return zl.r.File[i].Open()
}
// DecodeZip loads CLDR data from the zip archive for which r is the source.
func (d *Decoder) DecodeZip(r io.Reader) (cldr *CLDR, err error) {
buffer, err := ioutil.ReadAll(r)
if err != nil {
return nil, err
}
archive, err := zip.NewReader(bytes.NewReader(buffer), int64(len(buffer)))
if err != nil {
return nil, err
}
return d.Decode(zipLoader{archive})
}

21
vendor/golang.org/x/text/unicode/cldr/examples_test.go generated vendored Normal file
View file

@ -0,0 +1,21 @@
package cldr_test
import (
"fmt"
"golang.org/x/text/unicode/cldr"
)
func ExampleSlice() {
var dr *cldr.CLDR // assume this is initalized
x, _ := dr.LDML("en")
cs := x.Collations.Collation
// remove all but the default
cldr.MakeSlice(&cs).Filter(func(e cldr.Elem) bool {
return e.GetCommon().Type != x.Collations.Default()
})
for i, c := range cs {
fmt.Println(i, c.Type)
}
}

400
vendor/golang.org/x/text/unicode/cldr/makexml.go generated vendored Normal file
View file

@ -0,0 +1,400 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
// This tool generates types for the various XML formats of CLDR.
package main
import (
"archive/zip"
"bytes"
"encoding/xml"
"flag"
"fmt"
"io"
"io/ioutil"
"log"
"os"
"regexp"
"strings"
"golang.org/x/text/internal/gen"
)
var outputFile = flag.String("output", "xml.go", "output file name")
func main() {
flag.Parse()
r := gen.OpenCLDRCoreZip()
buffer, err := ioutil.ReadAll(r)
if err != nil {
log.Fatal("Could not read zip file")
}
r.Close()
z, err := zip.NewReader(bytes.NewReader(buffer), int64(len(buffer)))
if err != nil {
log.Fatalf("Could not read zip archive: %v", err)
}
var buf bytes.Buffer
version := gen.CLDRVersion()
for _, dtd := range files {
for _, f := range z.File {
if strings.HasSuffix(f.Name, dtd.file+".dtd") {
r, err := f.Open()
failOnError(err)
b := makeBuilder(&buf, dtd)
b.parseDTD(r)
b.resolve(b.index[dtd.top[0]])
b.write()
if b.version != "" && version != b.version {
println(f.Name)
log.Fatalf("main: inconsistent versions: found %s; want %s", b.version, version)
}
break
}
}
}
fmt.Fprintln(&buf, "// Version is the version of CLDR from which the XML definitions are generated.")
fmt.Fprintf(&buf, "const Version = %q\n", version)
gen.WriteGoFile(*outputFile, "cldr", buf.Bytes())
}
func failOnError(err error) {
if err != nil {
log.New(os.Stderr, "", log.Lshortfile).Output(2, err.Error())
os.Exit(1)
}
}
// configuration data per DTD type
type dtd struct {
file string // base file name
root string // Go name of the root XML element
top []string // create a different type for this section
skipElem []string // hard-coded or deprecated elements
skipAttr []string // attributes to exclude
predefined []string // hard-coded elements exist of the form <name>Elem
forceRepeat []string // elements to make slices despite DTD
}
var files = []dtd{
{
file: "ldmlBCP47",
root: "LDMLBCP47",
top: []string{"ldmlBCP47"},
skipElem: []string{
"cldrVersion", // deprecated, not used
},
},
{
file: "ldmlSupplemental",
root: "SupplementalData",
top: []string{"supplementalData"},
skipElem: []string{
"cldrVersion", // deprecated, not used
},
forceRepeat: []string{
"plurals", // data defined in plurals.xml and ordinals.xml
},
},
{
file: "ldml",
root: "LDML",
top: []string{
"ldml", "collation", "calendar", "timeZoneNames", "localeDisplayNames", "numbers",
},
skipElem: []string{
"cp", // not used anywhere
"special", // not used anywhere
"fallback", // deprecated, not used
"alias", // in Common
"default", // in Common
},
skipAttr: []string{
"hiraganaQuarternary", // typo in DTD, correct version included as well
},
predefined: []string{"rules"},
},
}
var comments = map[string]string{
"ldmlBCP47": `
// LDMLBCP47 holds information on allowable values for various variables in LDML.
`,
"supplementalData": `
// SupplementalData holds information relevant for internationalization
// and proper use of CLDR, but that is not contained in the locale hierarchy.
`,
"ldml": `
// LDML is the top-level type for locale-specific data.
`,
"collation": `
// Collation contains rules that specify a certain sort-order,
// as a tailoring of the root order.
// The parsed rules are obtained by passing a RuleProcessor to Collation's
// Process method.
`,
"calendar": `
// Calendar specifies the fields used for formatting and parsing dates and times.
// The month and quarter names are identified numerically, starting at 1.
// The day (of the week) names are identified with short strings, since there is
// no universally-accepted numeric designation.
`,
"dates": `
// Dates contains information regarding the format and parsing of dates and times.
`,
"localeDisplayNames": `
// LocaleDisplayNames specifies localized display names for for scripts, languages,
// countries, currencies, and variants.
`,
"numbers": `
// Numbers supplies information for formatting and parsing numbers and currencies.
`,
}
type element struct {
name string // XML element name
category string // elements contained by this element
signature string // category + attrKey*
attr []*attribute // attributes supported by this element.
sub []struct { // parsed and evaluated sub elements of this element.
e *element
repeat bool // true if the element needs to be a slice
}
resolved bool // prevent multiple resolutions of this element.
}
type attribute struct {
name string
key string
list []string
tag string // Go tag
}
var (
reHead = regexp.MustCompile(` *(\w+) +([\w\-]+)`)
reAttr = regexp.MustCompile(` *(\w+) *(?:(\w+)|\(([\w\- \|]+)\)) *(?:#([A-Z]*) *(?:\"([\.\d+])\")?)? *("[\w\-:]*")?`)
reElem = regexp.MustCompile(`^ *(EMPTY|ANY|\(.*\)[\*\+\?]?) *$`)
reToken = regexp.MustCompile(`\w\-`)
)
// builder is used to read in the DTD files from CLDR and generate Go code
// to be used with the encoding/xml package.
type builder struct {
w io.Writer
index map[string]*element
elem []*element
info dtd
version string
}
func makeBuilder(w io.Writer, d dtd) builder {
return builder{
w: w,
index: make(map[string]*element),
elem: []*element{},
info: d,
}
}
// parseDTD parses a DTD file.
func (b *builder) parseDTD(r io.Reader) {
for d := xml.NewDecoder(r); ; {
t, err := d.Token()
if t == nil {
break
}
failOnError(err)
dir, ok := t.(xml.Directive)
if !ok {
continue
}
m := reHead.FindSubmatch(dir)
dir = dir[len(m[0]):]
ename := string(m[2])
el, elementFound := b.index[ename]
switch string(m[1]) {
case "ELEMENT":
if elementFound {
log.Fatal("parseDTD: duplicate entry for element %q", ename)
}
m := reElem.FindSubmatch(dir)
if m == nil {
log.Fatalf("parseDTD: invalid element %q", string(dir))
}
if len(m[0]) != len(dir) {
log.Fatal("parseDTD: invalid element %q", string(dir), len(dir), len(m[0]), string(m[0]))
}
s := string(m[1])
el = &element{
name: ename,
category: s,
}
b.index[ename] = el
case "ATTLIST":
if !elementFound {
log.Fatalf("parseDTD: unknown element %q", ename)
}
s := string(dir)
m := reAttr.FindStringSubmatch(s)
if m == nil {
log.Fatal(fmt.Errorf("parseDTD: invalid attribute %q", string(dir)))
}
if m[4] == "FIXED" {
b.version = m[5]
} else {
switch m[1] {
case "draft", "references", "alt", "validSubLocales", "standard" /* in Common */ :
case "type", "choice":
default:
el.attr = append(el.attr, &attribute{
name: m[1],
key: s,
list: reToken.FindAllString(m[3], -1),
})
el.signature = fmt.Sprintf("%s=%s+%s", el.signature, m[1], m[2])
}
}
}
}
}
var reCat = regexp.MustCompile(`[ ,\|]*(?:(\(|\)|\#?[\w_-]+)([\*\+\?]?))?`)
// resolve takes a parsed element and converts it into structured data
// that can be used to generate the XML code.
func (b *builder) resolve(e *element) {
if e.resolved {
return
}
b.elem = append(b.elem, e)
e.resolved = true
s := e.category
found := make(map[string]bool)
sequenceStart := []int{}
for len(s) > 0 {
m := reCat.FindStringSubmatch(s)
if m == nil {
log.Fatalf("%s: invalid category string %q", e.name, s)
}
repeat := m[2] == "*" || m[2] == "+" || in(b.info.forceRepeat, m[1])
switch m[1] {
case "":
case "(":
sequenceStart = append(sequenceStart, len(e.sub))
case ")":
if len(sequenceStart) == 0 {
log.Fatalf("%s: unmatched closing parenthesis", e.name)
}
for i := sequenceStart[len(sequenceStart)-1]; i < len(e.sub); i++ {
e.sub[i].repeat = e.sub[i].repeat || repeat
}
sequenceStart = sequenceStart[:len(sequenceStart)-1]
default:
if in(b.info.skipElem, m[1]) {
} else if sub, ok := b.index[m[1]]; ok {
if !found[sub.name] {
e.sub = append(e.sub, struct {
e *element
repeat bool
}{sub, repeat})
found[sub.name] = true
b.resolve(sub)
}
} else if m[1] == "#PCDATA" || m[1] == "ANY" {
} else if m[1] != "EMPTY" {
log.Fatalf("resolve:%s: element %q not found", e.name, m[1])
}
}
s = s[len(m[0]):]
}
}
// return true if s is contained in set.
func in(set []string, s string) bool {
for _, v := range set {
if v == s {
return true
}
}
return false
}
var repl = strings.NewReplacer("-", " ", "_", " ")
// title puts the first character or each character following '_' in title case and
// removes all occurrences of '_'.
func title(s string) string {
return strings.Replace(strings.Title(repl.Replace(s)), " ", "", -1)
}
// writeElem generates Go code for a single element, recursively.
func (b *builder) writeElem(tab int, e *element) {
p := func(f string, x ...interface{}) {
f = strings.Replace(f, "\n", "\n"+strings.Repeat("\t", tab), -1)
fmt.Fprintf(b.w, f, x...)
}
if len(e.sub) == 0 && len(e.attr) == 0 {
p("Common")
return
}
p("struct {")
tab++
p("\nCommon")
for _, attr := range e.attr {
if !in(b.info.skipAttr, attr.name) {
p("\n%s string `xml:\"%s,attr\"`", title(attr.name), attr.name)
}
}
for _, sub := range e.sub {
if in(b.info.predefined, sub.e.name) {
p("\n%sElem", sub.e.name)
continue
}
if in(b.info.skipElem, sub.e.name) {
continue
}
p("\n%s ", title(sub.e.name))
if sub.repeat {
p("[]")
}
p("*")
if in(b.info.top, sub.e.name) {
p(title(sub.e.name))
} else {
b.writeElem(tab, sub.e)
}
p(" `xml:\"%s\"`", sub.e.name)
}
tab--
p("\n}")
}
// write generates the Go XML code.
func (b *builder) write() {
for i, name := range b.info.top {
e := b.index[name]
if e != nil {
fmt.Fprintf(b.w, comments[name])
name := title(e.name)
if i == 0 {
name = b.info.root
}
fmt.Fprintf(b.w, "type %s ", name)
b.writeElem(0, e)
fmt.Fprint(b.w, "\n")
}
}
}

602
vendor/golang.org/x/text/unicode/cldr/resolve.go generated vendored Normal file
View file

@ -0,0 +1,602 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cldr
// This file implements the various inheritance constructs defined by LDML.
// See http://www.unicode.org/reports/tr35/#Inheritance_and_Validity
// for more details.
import (
"fmt"
"log"
"reflect"
"regexp"
"sort"
"strings"
)
// fieldIter iterates over fields in a struct. It includes
// fields of embedded structs.
type fieldIter struct {
v reflect.Value
index, n []int
}
func iter(v reflect.Value) fieldIter {
if v.Kind() != reflect.Struct {
log.Panicf("value %v must be a struct", v)
}
i := fieldIter{
v: v,
index: []int{0},
n: []int{v.NumField()},
}
i.descent()
return i
}
func (i *fieldIter) descent() {
for f := i.field(); f.Anonymous && f.Type.NumField() > 0; f = i.field() {
i.index = append(i.index, 0)
i.n = append(i.n, f.Type.NumField())
}
}
func (i *fieldIter) done() bool {
return len(i.index) == 1 && i.index[0] >= i.n[0]
}
func skip(f reflect.StructField) bool {
return !f.Anonymous && (f.Name[0] < 'A' || f.Name[0] > 'Z')
}
func (i *fieldIter) next() {
for {
k := len(i.index) - 1
i.index[k]++
if i.index[k] < i.n[k] {
if !skip(i.field()) {
break
}
} else {
if k == 0 {
return
}
i.index = i.index[:k]
i.n = i.n[:k]
}
}
i.descent()
}
func (i *fieldIter) value() reflect.Value {
return i.v.FieldByIndex(i.index)
}
func (i *fieldIter) field() reflect.StructField {
return i.v.Type().FieldByIndex(i.index)
}
type visitor func(v reflect.Value) error
var stopDescent = fmt.Errorf("do not recurse")
func (f visitor) visit(x interface{}) error {
return f.visitRec(reflect.ValueOf(x))
}
// visit recursively calls f on all nodes in v.
func (f visitor) visitRec(v reflect.Value) error {
if v.Kind() == reflect.Ptr {
if v.IsNil() {
return nil
}
return f.visitRec(v.Elem())
}
if err := f(v); err != nil {
if err == stopDescent {
return nil
}
return err
}
switch v.Kind() {
case reflect.Struct:
for i := iter(v); !i.done(); i.next() {
if err := f.visitRec(i.value()); err != nil {
return err
}
}
case reflect.Slice:
for i := 0; i < v.Len(); i++ {
if err := f.visitRec(v.Index(i)); err != nil {
return err
}
}
}
return nil
}
// getPath is used for error reporting purposes only.
func getPath(e Elem) string {
if e == nil {
return "<nil>"
}
if e.enclosing() == nil {
return e.GetCommon().name
}
if e.GetCommon().Type == "" {
return fmt.Sprintf("%s.%s", getPath(e.enclosing()), e.GetCommon().name)
}
return fmt.Sprintf("%s.%s[type=%s]", getPath(e.enclosing()), e.GetCommon().name, e.GetCommon().Type)
}
// xmlName returns the xml name of the element or attribute
func xmlName(f reflect.StructField) (name string, attr bool) {
tags := strings.Split(f.Tag.Get("xml"), ",")
for _, s := range tags {
attr = attr || s == "attr"
}
return tags[0], attr
}
func findField(v reflect.Value, key string) (reflect.Value, error) {
v = reflect.Indirect(v)
for i := iter(v); !i.done(); i.next() {
if n, _ := xmlName(i.field()); n == key {
return i.value(), nil
}
}
return reflect.Value{}, fmt.Errorf("cldr: no field %q in element %#v", key, v.Interface())
}
var xpathPart = regexp.MustCompile(`(\pL+)(?:\[@(\pL+)='([\w-]+)'\])?`)
func walkXPath(e Elem, path string) (res Elem, err error) {
for _, c := range strings.Split(path, "/") {
if c == ".." {
if e = e.enclosing(); e == nil {
panic("path ..")
return nil, fmt.Errorf(`cldr: ".." moves past root in path %q`, path)
}
continue
} else if c == "" {
continue
}
m := xpathPart.FindStringSubmatch(c)
if len(m) == 0 || len(m[0]) != len(c) {
return nil, fmt.Errorf("cldr: syntax error in path component %q", c)
}
v, err := findField(reflect.ValueOf(e), m[1])
if err != nil {
return nil, err
}
switch v.Kind() {
case reflect.Slice:
i := 0
if m[2] != "" || v.Len() > 1 {
if m[2] == "" {
m[2] = "type"
if m[3] = e.GetCommon().Default(); m[3] == "" {
return nil, fmt.Errorf("cldr: type selector or default value needed for element %s", m[1])
}
}
for ; i < v.Len(); i++ {
vi := v.Index(i)
key, err := findField(vi.Elem(), m[2])
if err != nil {
return nil, err
}
key = reflect.Indirect(key)
if key.Kind() == reflect.String && key.String() == m[3] {
break
}
}
}
if i == v.Len() || v.Index(i).IsNil() {
return nil, fmt.Errorf("no %s found with %s==%s", m[1], m[2], m[3])
}
e = v.Index(i).Interface().(Elem)
case reflect.Ptr:
if v.IsNil() {
return nil, fmt.Errorf("cldr: element %q not found within element %q", m[1], e.GetCommon().name)
}
var ok bool
if e, ok = v.Interface().(Elem); !ok {
return nil, fmt.Errorf("cldr: %q is not an XML element", m[1])
} else if m[2] != "" || m[3] != "" {
return nil, fmt.Errorf("cldr: no type selector allowed for element %s", m[1])
}
default:
return nil, fmt.Errorf("cldr: %q is not an XML element", m[1])
}
}
return e, nil
}
const absPrefix = "//ldml/"
func (cldr *CLDR) resolveAlias(e Elem, src, path string) (res Elem, err error) {
if src != "locale" {
if !strings.HasPrefix(path, absPrefix) {
return nil, fmt.Errorf("cldr: expected absolute path, found %q", path)
}
path = path[len(absPrefix):]
if e, err = cldr.resolve(src); err != nil {
return nil, err
}
}
return walkXPath(e, path)
}
func (cldr *CLDR) resolveAndMergeAlias(e Elem) error {
alias := e.GetCommon().Alias
if alias == nil {
return nil
}
a, err := cldr.resolveAlias(e, alias.Source, alias.Path)
if err != nil {
return fmt.Errorf("%v: error evaluating path %q: %v", getPath(e), alias.Path, err)
}
// Ensure alias node was already evaluated. TODO: avoid double evaluation.
err = cldr.resolveAndMergeAlias(a)
v := reflect.ValueOf(e).Elem()
for i := iter(reflect.ValueOf(a).Elem()); !i.done(); i.next() {
if vv := i.value(); vv.Kind() != reflect.Ptr || !vv.IsNil() {
if _, attr := xmlName(i.field()); !attr {
v.FieldByIndex(i.index).Set(vv)
}
}
}
return err
}
func (cldr *CLDR) aliasResolver() visitor {
return func(v reflect.Value) (err error) {
if e, ok := v.Addr().Interface().(Elem); ok {
err = cldr.resolveAndMergeAlias(e)
if err == nil && blocking[e.GetCommon().name] {
return stopDescent
}
}
return err
}
}
// elements within blocking elements do not inherit.
// Taken from CLDR's supplementalMetaData.xml.
var blocking = map[string]bool{
"identity": true,
"supplementalData": true,
"cldrTest": true,
"collation": true,
"transform": true,
}
// Distinguishing attributes affect inheritance; two elements with different
// distinguishing attributes are treated as different for purposes of inheritance,
// except when such attributes occur in the indicated elements.
// Taken from CLDR's supplementalMetaData.xml.
var distinguishing = map[string][]string{
"key": nil,
"request_id": nil,
"id": nil,
"registry": nil,
"alt": nil,
"iso4217": nil,
"iso3166": nil,
"mzone": nil,
"from": nil,
"to": nil,
"type": []string{
"abbreviationFallback",
"default",
"mapping",
"measurementSystem",
"preferenceOrdering",
},
"numberSystem": nil,
}
func in(set []string, s string) bool {
for _, v := range set {
if v == s {
return true
}
}
return false
}
// attrKey computes a key based on the distinguishable attributes of
// an element and it's values.
func attrKey(v reflect.Value, exclude ...string) string {
parts := []string{}
ename := v.Interface().(Elem).GetCommon().name
v = v.Elem()
for i := iter(v); !i.done(); i.next() {
if name, attr := xmlName(i.field()); attr {
if except, ok := distinguishing[name]; ok && !in(exclude, name) && !in(except, ename) {
v := i.value()
if v.Kind() == reflect.Ptr {
v = v.Elem()
}
if v.IsValid() {
parts = append(parts, fmt.Sprintf("%s=%s", name, v.String()))
}
}
}
}
sort.Strings(parts)
return strings.Join(parts, ";")
}
// Key returns a key for e derived from all distinguishing attributes
// except those specified by exclude.
func Key(e Elem, exclude ...string) string {
return attrKey(reflect.ValueOf(e), exclude...)
}
// linkEnclosing sets the enclosing element as well as the name
// for all sub-elements of child, recursively.
func linkEnclosing(parent, child Elem) {
child.setEnclosing(parent)
v := reflect.ValueOf(child).Elem()
for i := iter(v); !i.done(); i.next() {
vf := i.value()
if vf.Kind() == reflect.Slice {
for j := 0; j < vf.Len(); j++ {
linkEnclosing(child, vf.Index(j).Interface().(Elem))
}
} else if vf.Kind() == reflect.Ptr && !vf.IsNil() && vf.Elem().Kind() == reflect.Struct {
linkEnclosing(child, vf.Interface().(Elem))
}
}
}
func setNames(e Elem, name string) {
e.setName(name)
v := reflect.ValueOf(e).Elem()
for i := iter(v); !i.done(); i.next() {
vf := i.value()
name, _ = xmlName(i.field())
if vf.Kind() == reflect.Slice {
for j := 0; j < vf.Len(); j++ {
setNames(vf.Index(j).Interface().(Elem), name)
}
} else if vf.Kind() == reflect.Ptr && !vf.IsNil() && vf.Elem().Kind() == reflect.Struct {
setNames(vf.Interface().(Elem), name)
}
}
}
// deepCopy copies elements of v recursively. All elements of v that may
// be modified by inheritance are explicitly copied.
func deepCopy(v reflect.Value) reflect.Value {
switch v.Kind() {
case reflect.Ptr:
if v.IsNil() || v.Elem().Kind() != reflect.Struct {
return v
}
nv := reflect.New(v.Elem().Type())
nv.Elem().Set(v.Elem())
deepCopyRec(nv.Elem(), v.Elem())
return nv
case reflect.Slice:
nv := reflect.MakeSlice(v.Type(), v.Len(), v.Len())
for i := 0; i < v.Len(); i++ {
deepCopyRec(nv.Index(i), v.Index(i))
}
return nv
}
panic("deepCopy: must be called with pointer or slice")
}
// deepCopyRec is only called by deepCopy.
func deepCopyRec(nv, v reflect.Value) {
if v.Kind() == reflect.Struct {
t := v.Type()
for i := 0; i < v.NumField(); i++ {
if name, attr := xmlName(t.Field(i)); name != "" && !attr {
deepCopyRec(nv.Field(i), v.Field(i))
}
}
} else {
nv.Set(deepCopy(v))
}
}
// newNode is used to insert a missing node during inheritance.
func (cldr *CLDR) newNode(v, enc reflect.Value) reflect.Value {
n := reflect.New(v.Type())
for i := iter(v); !i.done(); i.next() {
if name, attr := xmlName(i.field()); name == "" || attr {
n.Elem().FieldByIndex(i.index).Set(i.value())
}
}
n.Interface().(Elem).GetCommon().setEnclosing(enc.Addr().Interface().(Elem))
return n
}
// v, parent must be pointers to struct
func (cldr *CLDR) inheritFields(v, parent reflect.Value) (res reflect.Value, err error) {
t := v.Type()
nv := reflect.New(t)
nv.Elem().Set(v)
for i := iter(v); !i.done(); i.next() {
vf := i.value()
f := i.field()
name, attr := xmlName(f)
if name == "" || attr {
continue
}
pf := parent.FieldByIndex(i.index)
if blocking[name] {
if vf.IsNil() {
vf = pf
}
nv.Elem().FieldByIndex(i.index).Set(deepCopy(vf))
continue
}
switch f.Type.Kind() {
case reflect.Ptr:
if f.Type.Elem().Kind() == reflect.Struct {
if !vf.IsNil() {
if vf, err = cldr.inheritStructPtr(vf, pf); err != nil {
return reflect.Value{}, err
}
vf.Interface().(Elem).setEnclosing(nv.Interface().(Elem))
nv.Elem().FieldByIndex(i.index).Set(vf)
} else if !pf.IsNil() {
n := cldr.newNode(pf.Elem(), v)
if vf, err = cldr.inheritStructPtr(n, pf); err != nil {
return reflect.Value{}, err
}
vf.Interface().(Elem).setEnclosing(nv.Interface().(Elem))
nv.Elem().FieldByIndex(i.index).Set(vf)
}
}
case reflect.Slice:
vf, err := cldr.inheritSlice(nv.Elem(), vf, pf)
if err != nil {
return reflect.Zero(t), err
}
nv.Elem().FieldByIndex(i.index).Set(vf)
}
}
return nv, nil
}
func root(e Elem) *LDML {
for ; e.enclosing() != nil; e = e.enclosing() {
}
return e.(*LDML)
}
// inheritStructPtr first merges possible aliases in with v and then inherits
// any underspecified elements from parent.
func (cldr *CLDR) inheritStructPtr(v, parent reflect.Value) (r reflect.Value, err error) {
if !v.IsNil() {
e := v.Interface().(Elem).GetCommon()
alias := e.Alias
if alias == nil && !parent.IsNil() {
alias = parent.Interface().(Elem).GetCommon().Alias
}
if alias != nil {
a, err := cldr.resolveAlias(v.Interface().(Elem), alias.Source, alias.Path)
if a != nil {
if v, err = cldr.inheritFields(v.Elem(), reflect.ValueOf(a).Elem()); err != nil {
return reflect.Value{}, err
}
}
}
if !parent.IsNil() {
return cldr.inheritFields(v.Elem(), parent.Elem())
}
} else if parent.IsNil() {
panic("should not reach here")
}
return v, nil
}
// Must be slice of struct pointers.
func (cldr *CLDR) inheritSlice(enc, v, parent reflect.Value) (res reflect.Value, err error) {
t := v.Type()
index := make(map[string]reflect.Value)
if !v.IsNil() {
for i := 0; i < v.Len(); i++ {
vi := v.Index(i)
key := attrKey(vi)
index[key] = vi
}
}
if !parent.IsNil() {
for i := 0; i < parent.Len(); i++ {
vi := parent.Index(i)
key := attrKey(vi)
if w, ok := index[key]; ok {
index[key], err = cldr.inheritStructPtr(w, vi)
} else {
n := cldr.newNode(vi.Elem(), enc)
index[key], err = cldr.inheritStructPtr(n, vi)
}
index[key].Interface().(Elem).setEnclosing(enc.Addr().Interface().(Elem))
if err != nil {
return v, err
}
}
}
keys := make([]string, 0, len(index))
for k, _ := range index {
keys = append(keys, k)
}
sort.Strings(keys)
sl := reflect.MakeSlice(t, len(index), len(index))
for i, k := range keys {
sl.Index(i).Set(index[k])
}
return sl, nil
}
func parentLocale(loc string) string {
parts := strings.Split(loc, "_")
if len(parts) == 1 {
return "root"
}
parts = parts[:len(parts)-1]
key := strings.Join(parts, "_")
return key
}
func (cldr *CLDR) resolve(loc string) (res *LDML, err error) {
if r := cldr.resolved[loc]; r != nil {
return r, nil
}
x := cldr.RawLDML(loc)
if x == nil {
return nil, fmt.Errorf("cldr: unknown locale %q", loc)
}
var v reflect.Value
if loc == "root" {
x = deepCopy(reflect.ValueOf(x)).Interface().(*LDML)
linkEnclosing(nil, x)
err = cldr.aliasResolver().visit(x)
} else {
key := parentLocale(loc)
var parent *LDML
for ; cldr.locale[key] == nil; key = parentLocale(key) {
}
if parent, err = cldr.resolve(key); err != nil {
return nil, err
}
v, err = cldr.inheritFields(reflect.ValueOf(x).Elem(), reflect.ValueOf(parent).Elem())
x = v.Interface().(*LDML)
linkEnclosing(nil, x)
}
if err != nil {
return nil, err
}
cldr.resolved[loc] = x
return x, err
}
// finalize finalizes the initialization of the raw LDML structs. It also
// removed unwanted fields, as specified by filter, so that they will not
// be unnecessarily evaluated.
func (cldr *CLDR) finalize(filter []string) {
for _, x := range cldr.locale {
if filter != nil {
v := reflect.ValueOf(x).Elem()
t := v.Type()
for i := 0; i < v.NumField(); i++ {
f := t.Field(i)
name, _ := xmlName(f)
if name != "" && name != "identity" && !in(filter, name) {
v.Field(i).Set(reflect.Zero(f.Type))
}
}
}
linkEnclosing(nil, x) // for resolving aliases and paths
setNames(x, "ldml")
}
}

368
vendor/golang.org/x/text/unicode/cldr/resolve_test.go generated vendored Normal file
View file

@ -0,0 +1,368 @@
package cldr
import (
"fmt"
"log"
"reflect"
"testing"
)
func failOnError(err error) {
if err != nil {
log.Panic(err)
}
}
func data() *CLDR {
d := Decoder{}
data, err := d.Decode(testLoader{})
failOnError(err)
return data
}
type h struct {
A string `xml:"ha,attr"`
E string `xml:"he"`
D string `xml:",chardata"`
X string
}
type fieldTest struct {
Common
To string `xml:"to,attr"`
Key string `xml:"key,attr"`
E string `xml:"e"`
D string `xml:",chardata"`
X string
h
}
var testStruct = fieldTest{
Common: Common{
name: "mapping", // exclude "type" as distinguising attribute
Type: "foo",
Alt: "foo",
},
To: "nyc",
Key: "k",
E: "E",
D: "D",
h: h{
A: "A",
E: "E",
D: "D",
},
}
func TestIter(t *testing.T) {
tests := map[string]string{
"Type": "foo",
"Alt": "foo",
"To": "nyc",
"A": "A",
"Alias": "<nil>",
}
k := 0
for i := iter(reflect.ValueOf(testStruct)); !i.done(); i.next() {
v := i.value()
if v.Kind() == reflect.Ptr && v.Elem().Kind() == reflect.String {
v = v.Elem()
}
name := i.field().Name
if w, ok := tests[name]; ok {
s := fmt.Sprint(v.Interface())
if w != s {
t.Errorf("value: found %q; want %q", w, s)
}
delete(tests, name)
}
k++
}
if len(tests) != 0 {
t.Errorf("missing fields: %v", tests)
}
}
func TestFindField(t *testing.T) {
tests := []struct {
name, val string
exist bool
}{
{"type", "foo", true},
{"alt", "foo", true},
{"to", "nyc", true},
{"he", "E", true},
{"q", "", false},
}
vf := reflect.ValueOf(testStruct)
for i, tt := range tests {
v, err := findField(vf, tt.name)
if (err == nil) != tt.exist {
t.Errorf("%d: field %q present is %v; want %v", i, tt.name, err == nil, tt.exist)
} else if tt.exist {
if v.Kind() == reflect.Ptr {
if v.IsNil() {
continue
}
v = v.Elem()
}
if v.String() != tt.val {
t.Errorf("%d: found value %q; want %q", i, v.String(), tt.val)
}
}
}
}
var keyTests = []struct {
exclude []string
key string
}{
{[]string{}, "alt=foo;key=k;to=nyc"},
{[]string{"type"}, "alt=foo;key=k;to=nyc"},
{[]string{"choice"}, "alt=foo;key=k;to=nyc"},
{[]string{"alt"}, "key=k;to=nyc"},
{[]string{"a"}, "alt=foo;key=k;to=nyc"},
{[]string{"to"}, "alt=foo;key=k"},
{[]string{"alt", "to"}, "key=k"},
{[]string{"alt", "to", "key"}, ""},
}
func TestAttrKey(t *testing.T) {
v := reflect.ValueOf(&testStruct)
for i, tt := range keyTests {
key := attrKey(v, tt.exclude...)
if key != tt.key {
t.Errorf("%d: found %q, want %q", i, key, tt.key)
}
}
}
func TestKey(t *testing.T) {
for i, tt := range keyTests {
key := Key(&testStruct, tt.exclude...)
if key != tt.key {
t.Errorf("%d: found %q, want %q", i, key, tt.key)
}
}
}
func testEnclosing(t *testing.T, x *LDML, name string) {
eq := func(a, b Elem, i int) {
for ; i > 0; i-- {
b = b.enclosing()
}
if a != b {
t.Errorf("%s: found path %q, want %q", name, getPath(a), getPath(b))
}
}
eq(x, x, 0)
eq(x, x.Identity, 1)
eq(x, x.Dates.Calendars, 2)
eq(x, x.Dates.Calendars.Calendar[0], 3)
eq(x, x.Dates.Calendars.Calendar[1], 3)
//eq(x, x.Dates.Calendars.Calendar[0].Months, 4)
eq(x, x.Dates.Calendars.Calendar[1].Months, 4)
}
func TestEnclosing(t *testing.T) {
testEnclosing(t, data().RawLDML("de"), "enclosing-raw")
de, _ := data().LDML("de")
testEnclosing(t, de, "enclosing")
}
func TestDeepCopy(t *testing.T) {
eq := func(have, want string) {
if have != want {
t.Errorf("found %q; want %q", have, want)
}
}
x, _ := data().LDML("de")
vc := deepCopy(reflect.ValueOf(x))
c := vc.Interface().(*LDML)
linkEnclosing(nil, c)
if x == c {
t.Errorf("did not copy")
}
eq(c.name, "ldml")
eq(c.Dates.name, "dates")
testEnclosing(t, c, "deepCopy")
}
type getTest struct {
loc string
path string
field string // used in combination with length
data string
altData string // used for buddhist calendar if value != ""
typ string
length int
missing bool
}
const (
budMon = "dates/calendars/calendar[@type='buddhist']/months/"
chnMon = "dates/calendars/calendar[@type='chinese']/months/"
greMon = "dates/calendars/calendar[@type='gregorian']/months/"
)
func monthVal(path, context, width string, month int) string {
const format = "%s/monthContext[@type='%s']/monthWidth[@type='%s']/month[@type='%d']"
return fmt.Sprintf(format, path, context, width, month)
}
var rootGetTests = []getTest{
{loc: "root", path: "identity/language", typ: "root"},
{loc: "root", path: "characters/moreInformation", data: "?"},
{loc: "root", path: "characters", field: "exemplarCharacters", length: 3},
{loc: "root", path: greMon, field: "monthContext", length: 2},
{loc: "root", path: greMon + "monthContext[@type='format']/monthWidth[@type='narrow']", field: "month", length: 4},
{loc: "root", path: greMon + "monthContext[@type='stand-alone']/monthWidth[@type='wide']", field: "month", length: 4},
// unescaping character data
{loc: "root", path: "characters/exemplarCharacters[@type='punctuation']", data: `[\- — … ' " “ „ \& #]`},
// default resolution
{loc: "root", path: "dates/calendars/calendar", typ: "gregorian"},
// alias resolution
{loc: "root", path: budMon, field: "monthContext", length: 2},
// crossing but non-circular alias resolution
{loc: "root", path: budMon + "monthContext[@type='format']/monthWidth[@type='narrow']", field: "month", length: 4},
{loc: "root", path: budMon + "monthContext[@type='stand-alone']/monthWidth[@type='wide']", field: "month", length: 4},
{loc: "root", path: monthVal(greMon, "format", "wide", 1), data: "11"},
{loc: "root", path: monthVal(greMon, "format", "narrow", 2), data: "2"},
{loc: "root", path: monthVal(greMon, "stand-alone", "wide", 3), data: "33"},
{loc: "root", path: monthVal(greMon, "stand-alone", "narrow", 4), data: "4"},
{loc: "root", path: monthVal(budMon, "format", "wide", 1), data: "11"},
{loc: "root", path: monthVal(budMon, "format", "narrow", 2), data: "2"},
{loc: "root", path: monthVal(budMon, "stand-alone", "wide", 3), data: "33"},
{loc: "root", path: monthVal(budMon, "stand-alone", "narrow", 4), data: "4"},
}
// 19
var deGetTests = []getTest{
{loc: "de", path: "identity/language", typ: "de"},
{loc: "de", path: "posix", length: 2},
{loc: "de", path: "characters", field: "exemplarCharacters", length: 4},
{loc: "de", path: "characters/exemplarCharacters[@type='auxiliary']", data: `[á à ă]`},
// identity is a blocking element, so de should not inherit generation from root.
{loc: "de", path: "identity/generation", missing: true},
// default resolution
{loc: "root", path: "dates/calendars/calendar", typ: "gregorian"},
// absolute path alias resolution
{loc: "gsw", path: "posix", field: "messages", length: 1},
{loc: "gsw", path: "posix/messages/yesstr", data: "yes:y"},
}
// 27(greMon) - 52(budMon) - 77(chnMon)
func calGetTests(s string) []getTest {
tests := []getTest{
{loc: "de", path: s, length: 2},
{loc: "de", path: s + "monthContext[@type='format']/monthWidth[@type='wide']", field: "month", length: 5},
{loc: "de", path: monthVal(s, "format", "wide", 1), data: "11"},
{loc: "de", path: monthVal(s, "format", "wide", 2), data: "22"},
{loc: "de", path: monthVal(s, "format", "wide", 3), data: "Maerz", altData: "bbb"},
{loc: "de", path: monthVal(s, "format", "wide", 4), data: "April"},
{loc: "de", path: monthVal(s, "format", "wide", 5), data: "Mai"},
{loc: "de", path: s + "monthContext[@type='format']/monthWidth[@type='narrow']", field: "month", length: 5},
{loc: "de", path: monthVal(s, "format", "narrow", 1), data: "1"},
{loc: "de", path: monthVal(s, "format", "narrow", 2), data: "2"},
{loc: "de", path: monthVal(s, "format", "narrow", 3), data: "M", altData: "BBB"},
{loc: "de", path: monthVal(s, "format", "narrow", 4), data: "A"},
{loc: "de", path: monthVal(s, "format", "narrow", 5), data: "m"},
{loc: "de", path: s + "monthContext[@type='stand-alone']/monthWidth[@type='wide']", field: "month", length: 5},
{loc: "de", path: monthVal(s, "stand-alone", "wide", 1), data: "11"},
{loc: "de", path: monthVal(s, "stand-alone", "wide", 2), data: "22"},
{loc: "de", path: monthVal(s, "stand-alone", "wide", 3), data: "Maerz", altData: "bbb"},
{loc: "de", path: monthVal(s, "stand-alone", "wide", 4), data: "april"},
{loc: "de", path: monthVal(s, "stand-alone", "wide", 5), data: "mai"},
{loc: "de", path: s + "monthContext[@type='stand-alone']/monthWidth[@type='narrow']", field: "month", length: 5},
{loc: "de", path: monthVal(s, "stand-alone", "narrow", 1), data: "1"},
{loc: "de", path: monthVal(s, "stand-alone", "narrow", 2), data: "2"},
{loc: "de", path: monthVal(s, "stand-alone", "narrow", 3), data: "m"},
{loc: "de", path: monthVal(s, "stand-alone", "narrow", 4), data: "4"},
{loc: "de", path: monthVal(s, "stand-alone", "narrow", 5), data: "m"},
}
if s == budMon {
for i, t := range tests {
if t.altData != "" {
tests[i].data = t.altData
}
}
}
return tests
}
var getTests = append(rootGetTests,
append(deGetTests,
append(calGetTests(greMon),
append(calGetTests(budMon),
calGetTests(chnMon)...)...)...)...)
func TestPath(t *testing.T) {
d := data()
for i, tt := range getTests {
x, _ := d.LDML(tt.loc)
e, err := walkXPath(x, tt.path)
if err != nil {
if !tt.missing {
t.Errorf("%d:error: %v %v", i, err, tt.missing)
}
continue
}
if tt.missing {
t.Errorf("%d: missing is %v; want %v", i, e == nil, tt.missing)
continue
}
if tt.data != "" && e.GetCommon().Data() != tt.data {
t.Errorf("%d: data is %v; want %v", i, e.GetCommon().Data(), tt.data)
continue
}
if tt.typ != "" && e.GetCommon().Type != tt.typ {
t.Errorf("%d: type is %v; want %v", i, e.GetCommon().Type, tt.typ)
continue
}
if tt.field != "" {
slice, _ := findField(reflect.ValueOf(e), tt.field)
if slice.Len() != tt.length {
t.Errorf("%d: length is %v; want %v", i, slice.Len(), tt.length)
continue
}
}
}
}
func TestGet(t *testing.T) {
d := data()
for i, tt := range getTests {
x, _ := d.LDML(tt.loc)
e, err := Get(x, tt.path)
if err != nil {
if !tt.missing {
t.Errorf("%d:error: %v %v", i, err, tt.missing)
}
continue
}
if tt.missing {
t.Errorf("%d: missing is %v; want %v", i, e == nil, tt.missing)
continue
}
if tt.data != "" && e.GetCommon().Data() != tt.data {
t.Errorf("%d: data is %v; want %v", i, e.GetCommon().Data(), tt.data)
continue
}
if tt.typ != "" && e.GetCommon().Type != tt.typ {
t.Errorf("%d: type is %v; want %v", i, e.GetCommon().Type, tt.typ)
continue
}
if tt.field != "" {
slice, _ := findField(reflect.ValueOf(e), tt.field)
if slice.Len() != tt.length {
t.Errorf("%d: length is %v; want %v", i, slice.Len(), tt.length)
continue
}
}
}
}

144
vendor/golang.org/x/text/unicode/cldr/slice.go generated vendored Normal file
View file

@ -0,0 +1,144 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cldr
import (
"fmt"
"reflect"
"sort"
)
// Slice provides utilities for modifying slices of elements.
// It can be wrapped around any slice of which the element type implements
// interface Elem.
type Slice struct {
ptr reflect.Value
typ reflect.Type
}
// Value returns the reflect.Value of the underlying slice.
func (s *Slice) Value() reflect.Value {
return s.ptr.Elem()
}
// MakeSlice wraps a pointer to a slice of Elems.
// It replaces the array pointed to by the slice so that subsequent modifications
// do not alter the data in a CLDR type.
// It panics if an incorrect type is passed.
func MakeSlice(slicePtr interface{}) Slice {
ptr := reflect.ValueOf(slicePtr)
if ptr.Kind() != reflect.Ptr {
panic(fmt.Sprintf("MakeSlice: argument must be pointer to slice, found %v", ptr.Type()))
}
sl := ptr.Elem()
if sl.Kind() != reflect.Slice {
panic(fmt.Sprintf("MakeSlice: argument must point to a slice, found %v", sl.Type()))
}
intf := reflect.TypeOf((*Elem)(nil)).Elem()
if !sl.Type().Elem().Implements(intf) {
panic(fmt.Sprintf("MakeSlice: element type of slice (%v) does not implement Elem", sl.Type().Elem()))
}
nsl := reflect.MakeSlice(sl.Type(), sl.Len(), sl.Len())
reflect.Copy(nsl, sl)
sl.Set(nsl)
return Slice{
ptr: ptr,
typ: sl.Type().Elem().Elem(),
}
}
func (s Slice) indexForAttr(a string) []int {
for i := iter(reflect.Zero(s.typ)); !i.done(); i.next() {
if n, _ := xmlName(i.field()); n == a {
return i.index
}
}
panic(fmt.Sprintf("MakeSlice: no attribute %q for type %v", a, s.typ))
}
// Filter filters s to only include elements for which fn returns true.
func (s Slice) Filter(fn func(e Elem) bool) {
k := 0
sl := s.Value()
for i := 0; i < sl.Len(); i++ {
vi := sl.Index(i)
if fn(vi.Interface().(Elem)) {
sl.Index(k).Set(vi)
k++
}
}
sl.Set(sl.Slice(0, k))
}
// Group finds elements in s for which fn returns the same value and groups
// them in a new Slice.
func (s Slice) Group(fn func(e Elem) string) []Slice {
m := make(map[string][]reflect.Value)
sl := s.Value()
for i := 0; i < sl.Len(); i++ {
vi := sl.Index(i)
key := fn(vi.Interface().(Elem))
m[key] = append(m[key], vi)
}
keys := []string{}
for k, _ := range m {
keys = append(keys, k)
}
sort.Strings(keys)
res := []Slice{}
for _, k := range keys {
nsl := reflect.New(sl.Type())
nsl.Elem().Set(reflect.Append(nsl.Elem(), m[k]...))
res = append(res, MakeSlice(nsl.Interface()))
}
return res
}
// SelectAnyOf filters s to contain only elements for which attr matches
// any of the values.
func (s Slice) SelectAnyOf(attr string, values ...string) {
index := s.indexForAttr(attr)
s.Filter(func(e Elem) bool {
vf := reflect.ValueOf(e).Elem().FieldByIndex(index)
return in(values, vf.String())
})
}
// SelectOnePerGroup filters s to include at most one element e per group of
// elements matching Key(attr), where e has an attribute a that matches any
// the values in v.
// If more than one element in a group matches a value in v preference
// is given to the element that matches the first value in v.
func (s Slice) SelectOnePerGroup(a string, v []string) {
index := s.indexForAttr(a)
grouped := s.Group(func(e Elem) string { return Key(e, a) })
sl := s.Value()
sl.Set(sl.Slice(0, 0))
for _, g := range grouped {
e := reflect.Value{}
found := len(v)
gsl := g.Value()
for i := 0; i < gsl.Len(); i++ {
vi := gsl.Index(i).Elem().FieldByIndex(index)
j := 0
for ; j < len(v) && v[j] != vi.String(); j++ {
}
if j < found {
found = j
e = gsl.Index(i)
}
}
if found < len(v) {
sl.Set(reflect.Append(sl, e))
}
}
}
// SelectDraft drops all elements from the list with a draft level smaller than d
// and selects the highest draft level of the remaining.
// This method assumes that the input CLDR is canonicalized.
func (s Slice) SelectDraft(d Draft) {
s.SelectOnePerGroup("draft", drafts[len(drafts)-2-int(d):])
}

175
vendor/golang.org/x/text/unicode/cldr/slice_test.go generated vendored Normal file
View file

@ -0,0 +1,175 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cldr
import (
"reflect"
"testing"
)
type testSlice []*Common
func mkElem(alt, typ, ref string) *Common {
return &Common{
Type: typ,
Reference: ref,
Alt: alt,
}
}
var (
testSlice1 = testSlice{
mkElem("1", "a", "i.a"),
mkElem("1", "b", "i.b"),
mkElem("1", "c", "i.c"),
mkElem("2", "b", "ii"),
mkElem("3", "c", "iii"),
mkElem("4", "a", "iv.a"),
mkElem("4", "d", "iv.d"),
}
testSliceE = testSlice{}
)
func panics(f func()) (panics bool) {
defer func() {
if err := recover(); err != nil {
panics = true
}
}()
f()
return panics
}
func TestMakeSlice(t *testing.T) {
foo := 1
bar := []int{}
tests := []struct {
i interface{}
panics bool
err string
}{
{&foo, true, "should panic when passed a pointer to the wrong type"},
{&bar, true, "should panic when slice element of the wrong type"},
{testSlice1, true, "should panic when passed a slice"},
{&testSlice1, false, "should not panic"},
}
for i, tt := range tests {
if panics(func() { MakeSlice(tt.i) }) != tt.panics {
t.Errorf("%d: %s", i, tt.err)
}
}
}
var anyOfTests = []struct {
sl testSlice
values []string
n int
}{
{testSliceE, []string{}, 0},
{testSliceE, []string{"1", "2", "3"}, 0},
{testSlice1, []string{}, 0},
{testSlice1, []string{"1"}, 3},
{testSlice1, []string{"2"}, 1},
{testSlice1, []string{"5"}, 0},
{testSlice1, []string{"1", "2", "3"}, 5},
}
func TestSelectAnyOf(t *testing.T) {
for i, tt := range anyOfTests {
sl := tt.sl
s := MakeSlice(&sl)
s.SelectAnyOf("alt", tt.values...)
if len(sl) != tt.n {
t.Errorf("%d: found len == %d; want %d", i, len(sl), tt.n)
}
}
sl := testSlice1
s := MakeSlice(&sl)
if !panics(func() { s.SelectAnyOf("foo") }) {
t.Errorf("should panic on non-existing attribute")
}
}
func TestFilter(t *testing.T) {
for i, tt := range anyOfTests {
sl := tt.sl
s := MakeSlice(&sl)
s.Filter(func(e Elem) bool {
v, _ := findField(reflect.ValueOf(e), "alt")
return in(tt.values, v.String())
})
if len(sl) != tt.n {
t.Errorf("%d: found len == %d; want %d", i, len(sl), tt.n)
}
}
}
func TestGroup(t *testing.T) {
f := func(excl ...string) func(Elem) string {
return func(e Elem) string {
return Key(e, excl...)
}
}
tests := []struct {
sl testSlice
f func(Elem) string
lens []int
}{
{testSliceE, f(), []int{}},
{testSlice1, f(), []int{1, 1, 1, 1, 1, 1, 1}},
{testSlice1, f("type"), []int{3, 1, 1, 2}},
{testSlice1, f("alt"), []int{2, 2, 2, 1}},
{testSlice1, f("alt", "type"), []int{7}},
{testSlice1, f("alt", "type"), []int{7}},
}
for i, tt := range tests {
sl := tt.sl
s := MakeSlice(&sl)
g := s.Group(tt.f)
if len(tt.lens) != len(g) {
t.Errorf("%d: found %d; want %d", i, len(g), len(tt.lens))
continue
}
for j, v := range tt.lens {
if n := g[j].Value().Len(); n != v {
t.Errorf("%d: found %d for length of group %d; want %d", i, n, j, v)
}
}
}
}
func TestSelectOnePerGroup(t *testing.T) {
tests := []struct {
sl testSlice
attr string
values []string
refs []string
}{
{testSliceE, "alt", []string{"1"}, []string{}},
{testSliceE, "type", []string{"a"}, []string{}},
{testSlice1, "alt", []string{"2", "3", "1"}, []string{"i.a", "ii", "iii"}},
{testSlice1, "alt", []string{"1", "4"}, []string{"i.a", "i.b", "i.c", "iv.d"}},
{testSlice1, "type", []string{"c", "d"}, []string{"i.c", "iii", "iv.d"}},
}
for i, tt := range tests {
sl := tt.sl
s := MakeSlice(&sl)
s.SelectOnePerGroup(tt.attr, tt.values)
if len(sl) != len(tt.refs) {
t.Errorf("%d: found result lenght %d; want %d", i, len(sl), len(tt.refs))
continue
}
for j, e := range sl {
if tt.refs[j] != e.Reference {
t.Errorf("%d:%d found %s; want %s", i, j, e.Reference, tt.refs[i])
}
}
}
sl := testSlice1
s := MakeSlice(&sl)
if !panics(func() { s.SelectOnePerGroup("foo", nil) }) {
t.Errorf("should panic on non-existing attribute")
}
}

1456
vendor/golang.org/x/text/unicode/cldr/xml.go generated vendored Normal file

File diff suppressed because it is too large Load diff

8
vendor/golang.org/x/text/unicode/doc.go generated vendored Normal file
View file

@ -0,0 +1,8 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// unicode holds packages with implementations of Unicode standards that are
// mostly used as building blocks for other packages in golang.org/x/text,
// layout engines, or are otherwise more low-level in nature.
package unicode

View file

@ -33,17 +33,9 @@ const (
// streamSafe implements the policy of when a CGJ should be inserted.
type streamSafe uint8
// mkStreamSafe is a shorthand for declaring a streamSafe var and calling
// first on it.
func mkStreamSafe(p Properties) streamSafe {
return streamSafe(p.nTrailingNonStarters())
}
// first inserts the first rune of a segment.
// first inserts the first rune of a segment. It is a faster version of next if
// it is known p represents the first rune in a segment.
func (ss *streamSafe) first(p Properties) {
if *ss != 0 {
panic("!= 0")
}
*ss = streamSafe(p.nTrailingNonStarters())
}
@ -66,7 +58,7 @@ func (ss *streamSafe) next(p Properties) ssState {
// be a non-starter. Note that it always hold that if nLead > 0 then
// nLead == nTrail.
if n == 0 {
*ss = 0
*ss = streamSafe(p.nTrailingNonStarters())
return ssStarter
}
return ssSuccess
@ -142,7 +134,6 @@ func (rb *reorderBuffer) setFlusher(out []byte, f func(*reorderBuffer) bool) {
func (rb *reorderBuffer) reset() {
rb.nrune = 0
rb.nbyte = 0
rb.ss = 0
}
func (rb *reorderBuffer) doFlush() bool {
@ -257,6 +248,9 @@ func (rb *reorderBuffer) insertUnsafe(src input, i int, info Properties) {
// It flushes the buffer on each new segment start.
func (rb *reorderBuffer) insertDecomposed(dcomp []byte) insertErr {
rb.tmpBytes.setBytes(dcomp)
// As the streamSafe accounting already handles the counting for modifiers,
// we don't have to call next. However, we do need to keep the accounting
// intact when flushing the buffer.
for i := 0; i < len(dcomp); {
info := rb.f.info(rb.tmpBytes, i)
if info.BoundaryBefore() && rb.nrune > 0 && !rb.doFlush() {

View file

@ -0,0 +1,130 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package norm
import "testing"
// TestCase is used for most tests.
type TestCase struct {
in []rune
out []rune
}
func runTests(t *testing.T, name string, fm Form, tests []TestCase) {
rb := reorderBuffer{}
rb.init(fm, nil)
for i, test := range tests {
rb.setFlusher(nil, appendFlush)
for j, rune := range test.in {
b := []byte(string(rune))
src := inputBytes(b)
info := rb.f.info(src, 0)
if j == 0 {
rb.ss.first(info)
} else {
rb.ss.next(info)
}
if rb.insertFlush(src, 0, info) < 0 {
t.Errorf("%s:%d: insert failed for rune %d", name, i, j)
}
}
rb.doFlush()
was := string(rb.out)
want := string(test.out)
if len(was) != len(want) {
t.Errorf("%s:%d: length = %d; want %d", name, i, len(was), len(want))
}
if was != want {
k, pfx := pidx(was, want)
t.Errorf("%s:%d: \nwas %s%+q; \nwant %s%+q", name, i, pfx, was[k:], pfx, want[k:])
}
}
}
func TestFlush(t *testing.T) {
const (
hello = "Hello "
world = "world!"
)
buf := make([]byte, maxByteBufferSize)
p := copy(buf, hello)
out := buf[p:]
rb := reorderBuffer{}
rb.initString(NFC, world)
if i := rb.flushCopy(out); i != 0 {
t.Errorf("wrote bytes on flush of empty buffer. (len(out) = %d)", i)
}
for i := range world {
// No need to set streamSafe values for this test.
rb.insertFlush(rb.src, i, rb.f.info(rb.src, i))
n := rb.flushCopy(out)
out = out[n:]
p += n
}
was := buf[:p]
want := hello + world
if string(was) != want {
t.Errorf(`output after flush was "%s"; want "%s"`, string(was), want)
}
if rb.nrune != 0 {
t.Errorf("non-null size of info buffer (rb.nrune == %d)", rb.nrune)
}
if rb.nbyte != 0 {
t.Errorf("non-null size of byte buffer (rb.nbyte == %d)", rb.nbyte)
}
}
var insertTests = []TestCase{
{[]rune{'a'}, []rune{'a'}},
{[]rune{0x300}, []rune{0x300}},
{[]rune{0x300, 0x316}, []rune{0x316, 0x300}}, // CCC(0x300)==230; CCC(0x316)==220
{[]rune{0x316, 0x300}, []rune{0x316, 0x300}},
{[]rune{0x41, 0x316, 0x300}, []rune{0x41, 0x316, 0x300}},
{[]rune{0x41, 0x300, 0x316}, []rune{0x41, 0x316, 0x300}},
{[]rune{0x300, 0x316, 0x41}, []rune{0x316, 0x300, 0x41}},
{[]rune{0x41, 0x300, 0x40, 0x316}, []rune{0x41, 0x300, 0x40, 0x316}},
}
func TestInsert(t *testing.T) {
runTests(t, "TestInsert", NFD, insertTests)
}
var decompositionNFDTest = []TestCase{
{[]rune{0xC0}, []rune{0x41, 0x300}},
{[]rune{0xAC00}, []rune{0x1100, 0x1161}},
{[]rune{0x01C4}, []rune{0x01C4}},
{[]rune{0x320E}, []rune{0x320E}},
{[]rune("음ẻ과"), []rune{0x110B, 0x1173, 0x11B7, 0x65, 0x309, 0x1100, 0x116A}},
}
var decompositionNFKDTest = []TestCase{
{[]rune{0xC0}, []rune{0x41, 0x300}},
{[]rune{0xAC00}, []rune{0x1100, 0x1161}},
{[]rune{0x01C4}, []rune{0x44, 0x5A, 0x030C}},
{[]rune{0x320E}, []rune{0x28, 0x1100, 0x1161, 0x29}},
}
func TestDecomposition(t *testing.T) {
runTests(t, "TestDecompositionNFD", NFD, decompositionNFDTest)
runTests(t, "TestDecompositionNFKD", NFKD, decompositionNFKDTest)
}
var compositionTest = []TestCase{
{[]rune{0x41, 0x300}, []rune{0xC0}},
{[]rune{0x41, 0x316}, []rune{0x41, 0x316}},
{[]rune{0x41, 0x300, 0x35D}, []rune{0xC0, 0x35D}},
{[]rune{0x41, 0x316, 0x300}, []rune{0xC0, 0x316}},
// blocking starter
{[]rune{0x41, 0x316, 0x40, 0x300}, []rune{0x41, 0x316, 0x40, 0x300}},
{[]rune{0x1100, 0x1161}, []rune{0xAC00}},
// parenthesized Hangul, alternate between ASCII and Hangul.
{[]rune{0x28, 0x1100, 0x1161, 0x29}, []rune{0x28, 0xAC00, 0x29}},
}
func TestComposition(t *testing.T) {
runTests(t, "TestComposition", NFC, compositionTest)
}

View file

@ -0,0 +1,82 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package norm_test
import (
"bytes"
"fmt"
"unicode/utf8"
"golang.org/x/text/unicode/norm"
)
// EqualSimple uses a norm.Iter to compare two non-normalized
// strings for equivalence.
func EqualSimple(a, b string) bool {
var ia, ib norm.Iter
ia.InitString(norm.NFKD, a)
ib.InitString(norm.NFKD, b)
for !ia.Done() && !ib.Done() {
if !bytes.Equal(ia.Next(), ib.Next()) {
return false
}
}
return ia.Done() && ib.Done()
}
// FindPrefix finds the longest common prefix of ASCII characters
// of a and b.
func FindPrefix(a, b string) int {
i := 0
for ; i < len(a) && i < len(b) && a[i] < utf8.RuneSelf && a[i] == b[i]; i++ {
}
return i
}
// EqualOpt is like EqualSimple, but optimizes the special
// case for ASCII characters.
func EqualOpt(a, b string) bool {
n := FindPrefix(a, b)
a, b = a[n:], b[n:]
var ia, ib norm.Iter
ia.InitString(norm.NFKD, a)
ib.InitString(norm.NFKD, b)
for !ia.Done() && !ib.Done() {
if !bytes.Equal(ia.Next(), ib.Next()) {
return false
}
if n := int64(FindPrefix(a[ia.Pos():], b[ib.Pos():])); n != 0 {
ia.Seek(n, 1)
ib.Seek(n, 1)
}
}
return ia.Done() && ib.Done()
}
var compareTests = []struct{ a, b string }{
{"aaa", "aaa"},
{"aaa", "aab"},
{"a\u0300a", "\u00E0a"},
{"a\u0300\u0320b", "a\u0320\u0300b"},
{"\u1E0A\u0323", "\x44\u0323\u0307"},
// A character that decomposes into multiple segments
// spans several iterations.
{"\u3304", "\u30A4\u30CB\u30F3\u30AF\u3099"},
}
func ExampleIter() {
for i, t := range compareTests {
r0 := EqualSimple(t.a, t.b)
r1 := EqualOpt(t.a, t.b)
fmt.Printf("%d: %v %v\n", i, r0, r1)
}
// Output:
// 0: true true
// 1: false false
// 2: true true
// 3: true true
// 4: true true
// 5: true true
}

27
vendor/golang.org/x/text/unicode/norm/example_test.go generated vendored Normal file
View file

@ -0,0 +1,27 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package norm_test
import (
"fmt"
"golang.org/x/text/unicode/norm"
)
func ExampleForm_NextBoundary() {
s := norm.NFD.String("Mêlée")
for i := 0; i < len(s); {
d := norm.NFC.NextBoundaryInString(s[i:], true)
fmt.Printf("%[1]s: %+[1]q\n", s[i:i+d])
i += d
}
// Output:
// M: "M"
// ê: "e\u0302"
// l: "l"
// é: "e\u0301"
// e: "e"
}

View file

@ -10,7 +10,7 @@ package norm
// and its corresponding decomposing form share the same trie. Each trie maps
// a rune to a uint16. The values take two forms. For v >= 0x8000:
// bits
// 15: 1 (inverse of NFD_QD bit of qcInfo)
// 15: 1 (inverse of NFD_QC bit of qcInfo)
// 13..7: qcInfo (see below). isYesD is always true (no decompostion).
// 6..0: ccc (compressed CCC value).
// For v < 0x8000, the respective rune has a decomposition and v is an index

54
vendor/golang.org/x/text/unicode/norm/forminfo_test.go generated vendored Normal file
View file

@ -0,0 +1,54 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build test
package norm
import "testing"
func TestProperties(t *testing.T) {
var d runeData
CK := [2]string{"C", "K"}
for k, r := 1, rune(0); r < 0x2ffff; r++ {
if k < len(testData) && r == testData[k].r {
d = testData[k]
k++
}
s := string(r)
for j, p := range []Properties{NFC.PropertiesString(s), NFKC.PropertiesString(s)} {
f := d.f[j]
if p.CCC() != d.ccc {
t.Errorf("%U: ccc(%s): was %d; want %d %X", r, CK[j], p.CCC(), d.ccc, p.index)
}
if p.isYesC() != (f.qc == Yes) {
t.Errorf("%U: YesC(%s): was %v; want %v", r, CK[j], p.isYesC(), f.qc == Yes)
}
if p.combinesBackward() != (f.qc == Maybe) {
t.Errorf("%U: combines backwards(%s): was %v; want %v", r, CK[j], p.combinesBackward(), f.qc == Maybe)
}
if p.nLeadingNonStarters() != d.nLead {
t.Errorf("%U: nLead(%s): was %d; want %d %#v %#v", r, CK[j], p.nLeadingNonStarters(), d.nLead, p, d)
}
if p.nTrailingNonStarters() != d.nTrail {
t.Errorf("%U: nTrail(%s): was %d; want %d %#v %#v", r, CK[j], p.nTrailingNonStarters(), d.nTrail, p, d)
}
if p.combinesForward() != f.combinesForward {
t.Errorf("%U: combines forward(%s): was %v; want %v %#v", r, CK[j], p.combinesForward(), f.combinesForward, p)
}
// Skip Hangul as it is algorithmically computed.
if r >= hangulBase && r < hangulEnd {
continue
}
if p.hasDecomposition() {
if has := f.decomposition != ""; !has {
t.Errorf("%U: hasDecomposition(%s): was %v; want %v", r, CK[j], p.hasDecomposition(), has)
}
if string(p.Decomposition()) != f.decomposition {
t.Errorf("%U: decomp(%s): was %+q; want %+q", r, CK[j], p.Decomposition(), f.decomposition)
}
}
}
}
}

View file

@ -90,16 +90,20 @@ func (in *input) charinfoNFKC(p int) (uint16, int) {
}
func (in *input) hangul(p int) (r rune) {
var size int
if in.bytes == nil {
if !isHangulString(in.str[p:]) {
return 0
}
r, _ = utf8.DecodeRuneInString(in.str[p:])
r, size = utf8.DecodeRuneInString(in.str[p:])
} else {
if !isHangul(in.bytes[p:]) {
return 0
}
r, _ = utf8.DecodeRune(in.bytes[p:])
r, size = utf8.DecodeRune(in.bytes[p:])
}
if size != hangulUTF8Size {
return 0
}
return r
}

View file

@ -41,6 +41,7 @@ func (i *Iter) Init(f Form, src []byte) {
i.next = i.rb.f.nextMain
i.asciiF = nextASCIIBytes
i.info = i.rb.f.info(i.rb.src, i.p)
i.rb.ss.first(i.info)
}
// InitString initializes i to iterate over src after normalizing it to Form f.
@ -56,11 +57,12 @@ func (i *Iter) InitString(f Form, src string) {
i.next = i.rb.f.nextMain
i.asciiF = nextASCIIString
i.info = i.rb.f.info(i.rb.src, i.p)
i.rb.ss.first(i.info)
}
// Seek sets the segment to be returned by the next call to Next to start
// at position p. It is the responsibility of the caller to set p to the
// start of a UTF8 rune.
// start of a segment.
func (i *Iter) Seek(offset int64, whence int) (int64, error) {
var abs int64
switch whence {
@ -84,6 +86,7 @@ func (i *Iter) Seek(offset int64, whence int) (int64, error) {
i.multiSeg = nil
i.next = i.rb.f.nextMain
i.info = i.rb.f.info(i.rb.src, i.p)
i.rb.ss.first(i.info)
return abs, nil
}
@ -161,6 +164,7 @@ func nextHangul(i *Iter) []byte {
if next >= i.rb.nsrc {
i.setDone()
} else if i.rb.src.hangul(next) == 0 {
i.rb.ss.next(i.info)
i.info = i.rb.f.info(i.rb.src, i.p)
i.next = i.rb.f.nextMain
return i.next(i)
@ -204,12 +208,10 @@ func nextMultiNorm(i *Iter) []byte {
if info.BoundaryBefore() {
i.rb.compose()
seg := i.buf[:i.rb.flushCopy(i.buf[:])]
i.rb.ss.first(info)
i.rb.insertUnsafe(input{bytes: d}, j, info)
i.multiSeg = d[j+int(info.size):]
return seg
}
i.rb.ss.next(info)
i.rb.insertUnsafe(input{bytes: d}, j, info)
j += int(info.size)
}
@ -222,9 +224,9 @@ func nextMultiNorm(i *Iter) []byte {
func nextDecomposed(i *Iter) (next []byte) {
outp := 0
inCopyStart, outCopyStart := i.p, 0
ss := mkStreamSafe(i.info)
for {
if sz := int(i.info.size); sz <= 1 {
i.rb.ss = 0
p := i.p
i.p++ // ASCII or illegal byte. Either way, advance by 1.
if i.p >= i.rb.nsrc {
@ -243,6 +245,8 @@ func nextDecomposed(i *Iter) (next []byte) {
p := outp + len(d)
if outp > 0 {
i.rb.src.copySlice(i.buf[outCopyStart:], inCopyStart, i.p)
// TODO: this condition should not be possible, but we leave it
// in for defensive purposes.
if p > len(i.buf) {
return i.buf[:outp]
}
@ -266,7 +270,7 @@ func nextDecomposed(i *Iter) (next []byte) {
} else {
i.info = i.rb.f.info(i.rb.src, i.p)
}
switch ss.next(i.info) {
switch i.rb.ss.next(i.info) {
case ssOverflow:
i.next = nextCGJDecompose
fallthrough
@ -309,7 +313,7 @@ func nextDecomposed(i *Iter) (next []byte) {
}
prevCC := i.info.tccc
i.info = i.rb.f.info(i.rb.src, i.p)
if v := ss.next(i.info); v == ssStarter {
if v := i.rb.ss.next(i.info); v == ssStarter {
break
} else if v == ssOverflow {
i.next = nextCGJDecompose
@ -335,10 +339,6 @@ doNorm:
func doNormDecomposed(i *Iter) []byte {
for {
if s := i.rb.ss.next(i.info); s == ssOverflow {
i.next = nextCGJDecompose
break
}
i.rb.insertUnsafe(i.rb.src, i.p, i.info)
if i.p += int(i.info.size); i.p >= i.rb.nsrc {
i.setDone()
@ -348,6 +348,10 @@ func doNormDecomposed(i *Iter) []byte {
if i.info.ccc == 0 {
break
}
if s := i.rb.ss.next(i.info); s == ssOverflow {
i.next = nextCGJDecompose
break
}
}
// new segment or too many combining characters: exit normalization
return i.buf[:i.rb.flushCopy(i.buf[:])]
@ -357,6 +361,7 @@ func nextCGJDecompose(i *Iter) []byte {
i.rb.ss = 0
i.rb.insertCGJ()
i.next = nextDecomposed
i.rb.ss.first(i.info)
buf := doNormDecomposed(i)
return buf
}
@ -365,7 +370,6 @@ func nextCGJDecompose(i *Iter) []byte {
func nextComposed(i *Iter) []byte {
outp, startp := 0, i.p
var prevCC uint8
ss := mkStreamSafe(i.info)
for {
if !i.info.isYesC() {
goto doNorm
@ -385,11 +389,12 @@ func nextComposed(i *Iter) []byte {
i.setDone()
break
} else if i.rb.src._byte(i.p) < utf8.RuneSelf {
i.rb.ss = 0
i.next = i.asciiF
break
}
i.info = i.rb.f.info(i.rb.src, i.p)
if v := ss.next(i.info); v == ssStarter {
if v := i.rb.ss.next(i.info); v == ssStarter {
break
} else if v == ssOverflow {
i.next = nextCGJCompose
@ -401,8 +406,10 @@ func nextComposed(i *Iter) []byte {
}
return i.returnSlice(startp, i.p)
doNorm:
// reset to start position
i.p = startp
i.info = i.rb.f.info(i.rb.src, i.p)
i.rb.ss.first(i.info)
if i.info.multiSegment() {
d := i.info.Decomposition()
info := i.rb.f.info(input{bytes: d}, 0)

98
vendor/golang.org/x/text/unicode/norm/iter_test.go generated vendored Normal file
View file

@ -0,0 +1,98 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package norm
import (
"strings"
"testing"
)
func doIterNorm(f Form, s string) []byte {
acc := []byte{}
i := Iter{}
i.InitString(f, s)
for !i.Done() {
acc = append(acc, i.Next()...)
}
return acc
}
func TestIterNext(t *testing.T) {
runNormTests(t, "IterNext", func(f Form, out []byte, s string) []byte {
return doIterNorm(f, string(append(out, s...)))
})
}
type SegmentTest struct {
in string
out []string
}
var segmentTests = []SegmentTest{
{"\u1E0A\u0323a", []string{"\x44\u0323\u0307", "a", ""}},
{rep('a', segSize), append(strings.Split(rep('a', segSize), ""), "")},
{rep('a', segSize+2), append(strings.Split(rep('a', segSize+2), ""), "")},
{rep('a', segSize) + "\u0300aa",
append(strings.Split(rep('a', segSize-1), ""), "a\u0300", "a", "a", "")},
// U+0f73 is NOT treated as a starter as it is a modifier
{"a" + grave(29) + "\u0f73", []string{"a" + grave(29), cgj + "\u0f73"}},
{"a\u0f73", []string{"a\u0f73"}},
// U+ff9e is treated as a non-starter.
// TODO: should we? Note that this will only affect iteration, as whether
// or not we do so does not affect the normalization output and will either
// way result in consistent iteration output.
{"a" + grave(30) + "\uff9e", []string{"a" + grave(30), cgj + "\uff9e"}},
{"a\uff9e", []string{"a\uff9e"}},
}
var segmentTestsK = []SegmentTest{
{"\u3332", []string{"\u30D5", "\u30A1", "\u30E9", "\u30C3", "\u30C8\u3099", ""}},
// last segment of multi-segment decomposition needs normalization
{"\u3332\u093C", []string{"\u30D5", "\u30A1", "\u30E9", "\u30C3", "\u30C8\u093C\u3099", ""}},
{"\u320E", []string{"\x28", "\uAC00", "\x29"}},
// last segment should be copied to start of buffer.
{"\ufdfa", []string{"\u0635", "\u0644", "\u0649", " ", "\u0627", "\u0644", "\u0644", "\u0647", " ", "\u0639", "\u0644", "\u064a", "\u0647", " ", "\u0648", "\u0633", "\u0644", "\u0645", ""}},
{"\ufdfa" + grave(30), []string{"\u0635", "\u0644", "\u0649", " ", "\u0627", "\u0644", "\u0644", "\u0647", " ", "\u0639", "\u0644", "\u064a", "\u0647", " ", "\u0648", "\u0633", "\u0644", "\u0645" + grave(30), ""}},
{"\uFDFA" + grave(64), []string{"\u0635", "\u0644", "\u0649", " ", "\u0627", "\u0644", "\u0644", "\u0647", " ", "\u0639", "\u0644", "\u064a", "\u0647", " ", "\u0648", "\u0633", "\u0644", "\u0645" + grave(30), cgj + grave(30), cgj + grave(4), ""}},
// Hangul and Jamo are grouped together.
{"\uAC00", []string{"\u1100\u1161", ""}},
{"\uAC01", []string{"\u1100\u1161\u11A8", ""}},
{"\u1100\u1161", []string{"\u1100\u1161", ""}},
}
// Note that, by design, segmentation is equal for composing and decomposing forms.
func TestIterSegmentation(t *testing.T) {
segmentTest(t, "SegmentTestD", NFD, segmentTests)
segmentTest(t, "SegmentTestC", NFC, segmentTests)
segmentTest(t, "SegmentTestKD", NFKD, segmentTestsK)
segmentTest(t, "SegmentTestKC", NFKC, segmentTestsK)
}
func segmentTest(t *testing.T, name string, f Form, tests []SegmentTest) {
iter := Iter{}
for i, tt := range tests {
iter.InitString(f, tt.in)
for j, seg := range tt.out {
if seg == "" {
if !iter.Done() {
res := string(iter.Next())
t.Errorf(`%s:%d:%d: expected Done()==true, found segment %+q`, name, i, j, res)
}
continue
}
if iter.Done() {
t.Errorf("%s:%d:%d: Done()==true, want false", name, i, j)
}
seg = f.String(seg)
if res := string(iter.Next()); res != seg {
t.Errorf(`%s:%d:%d" segment was %+q (%d); want %+q (%d)`, name, i, j, pc(res), len(res), pc(seg), len(seg))
}
}
}
}

View file

@ -35,12 +35,9 @@ func main() {
computeNonStarterCounts()
verifyComputed()
printChars()
if *test {
testDerived()
printTestdata()
} else {
makeTables()
}
testDerived()
printTestdata()
makeTables()
}
var (
@ -602,6 +599,7 @@ func printCharInfoTables(w io.Writer) int {
}
index := normalDecomp
nTrail := chars[r].nTrailingNonStarters
nLead := chars[r].nLeadingNonStarters
if tccc > 0 || lccc > 0 || nTrail > 0 {
tccc <<= 2
tccc |= nTrail
@ -612,7 +610,7 @@ func printCharInfoTables(w io.Writer) int {
index = firstCCC
}
}
if lccc > 0 {
if lccc > 0 || nLead > 0 {
s += string([]byte{lccc})
if index == firstCCC {
log.Fatalf("%U: multi-segment decomposition not supported for decompositions with leading CCC != 0", r)

14
vendor/golang.org/x/text/unicode/norm/norm_test.go generated vendored Normal file
View file

@ -0,0 +1,14 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package norm_test
import (
"testing"
)
func TestPlaceHolder(t *testing.T) {
// Does nothing, just allows the Makefile to be canonical
// while waiting for the package itself to be written.
}

View file

@ -2,11 +2,12 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Note: the file data_test.go that is generated should not be checked in.
//go:generate go run maketables.go triegen.go
//go:generate go run maketables.go triegen.go -test
//go:generate go test -tags test
// Package norm contains types and functions for normalizing Unicode strings.
package norm
package norm // import "golang.org/x/text/unicode/norm"
import (
"unicode/utf8"
@ -323,7 +324,6 @@ func (f *formInfo) quickSpan(src input, i, end int, atEOF bool) (n int, ok bool)
// have an overflow for runes that are starters (e.g. with U+FF9E).
switch ss.next(info) {
case ssStarter:
ss.first(info)
lastSegStart = i
case ssOverflow:
return lastSegStart, false
@ -440,6 +440,8 @@ func (f Form) nextBoundary(src input, nsrc int, atEOF bool) int {
}
return -1
}
// TODO: Using streamSafe to determine the boundary isn't the same as
// using BoundaryBefore. Determine which should be used.
if s := ss.next(info); s != ssSuccess {
return i
}
@ -504,15 +506,14 @@ func decomposeSegment(rb *reorderBuffer, sp int, atEOF bool) int {
if info.size == 0 {
return 0
}
if rb.nrune > 0 {
if s := rb.ss.next(info); s == ssStarter {
goto end
} else if s == ssOverflow {
rb.insertCGJ()
if s := rb.ss.next(info); s == ssStarter {
// TODO: this could be removed if we don't support merging.
if rb.nrune > 0 {
goto end
}
} else {
rb.ss.first(info)
} else if s == ssOverflow {
rb.insertCGJ()
goto end
}
if err := rb.insertFlush(rb.src, sp, info); err != iSuccess {
return int(err)

1287
vendor/golang.org/x/text/unicode/norm/normalize_test.go generated vendored Normal file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,56 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package norm
import (
"bytes"
"fmt"
"testing"
)
var bufSizes = []int{1, 2, 3, 4, 5, 6, 7, 8, 100, 101, 102, 103, 4000, 4001, 4002, 4003}
func readFunc(size int) appendFunc {
return func(f Form, out []byte, s string) []byte {
out = append(out, s...)
r := f.Reader(bytes.NewBuffer(out))
buf := make([]byte, size)
result := []byte{}
for n, err := 0, error(nil); err == nil; {
n, err = r.Read(buf)
result = append(result, buf[:n]...)
}
return result
}
}
func TestReader(t *testing.T) {
for _, s := range bufSizes {
name := fmt.Sprintf("TestReader%d", s)
runNormTests(t, name, readFunc(s))
}
}
func writeFunc(size int) appendFunc {
return func(f Form, out []byte, s string) []byte {
in := append(out, s...)
result := new(bytes.Buffer)
w := f.Writer(result)
buf := make([]byte, size)
for n := 0; len(in) > 0; in = in[n:] {
n = copy(buf, in)
_, _ = w.Write(buf[:n])
}
w.Close()
return result.Bytes()
}
}
func TestWriter(t *testing.T) {
for _, s := range bufSizes {
name := fmt.Sprintf("TestWriter%d", s)
runNormTests(t, name, writeFunc(s))
}
}

File diff suppressed because it is too large Load diff

101
vendor/golang.org/x/text/unicode/norm/transform_test.go generated vendored Normal file
View file

@ -0,0 +1,101 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package norm
import (
"fmt"
"testing"
"golang.org/x/text/transform"
)
func TestTransform(t *testing.T) {
tests := []struct {
f Form
in, out string
eof bool
dstSize int
err error
}{
{NFC, "ab", "ab", true, 2, nil},
{NFC, "qx", "qx", true, 2, nil},
{NFD, "qx", "qx", true, 2, nil},
{NFC, "", "", true, 1, nil},
{NFD, "", "", true, 1, nil},
{NFC, "", "", false, 1, nil},
{NFD, "", "", false, 1, nil},
// Normalized segment does not fit in destination.
{NFD, "ö", "", true, 1, transform.ErrShortDst},
{NFD, "ö", "", true, 2, transform.ErrShortDst},
// As an artifact of the algorithm, only full segments are written.
// This is not strictly required, and some bytes could be written.
// In practice, for Transform to not block, the destination buffer
// should be at least MaxSegmentSize to work anyway and these edge
// conditions will be relatively rare.
{NFC, "ab", "", true, 1, transform.ErrShortDst},
// This is even true for inert runes.
{NFC, "qx", "", true, 1, transform.ErrShortDst},
{NFC, "a\u0300abc", "\u00e0a", true, 4, transform.ErrShortDst},
// We cannot write a segment if succesive runes could still change the result.
{NFD, "ö", "", false, 3, transform.ErrShortSrc},
{NFC, "a\u0300", "", false, 4, transform.ErrShortSrc},
{NFD, "a\u0300", "", false, 4, transform.ErrShortSrc},
{NFC, "ö", "", false, 3, transform.ErrShortSrc},
{NFC, "a\u0300", "", true, 1, transform.ErrShortDst},
// Theoretically could fit, but won't due to simplified checks.
{NFC, "a\u0300", "", true, 2, transform.ErrShortDst},
{NFC, "a\u0300", "", true, 3, transform.ErrShortDst},
{NFC, "a\u0300", "\u00e0", true, 4, nil},
{NFD, "öa\u0300", "o\u0308", false, 8, transform.ErrShortSrc},
{NFD, "öa\u0300ö", "o\u0308a\u0300", true, 8, transform.ErrShortDst},
{NFD, "öa\u0300ö", "o\u0308a\u0300", false, 12, transform.ErrShortSrc},
// Illegal input is copied verbatim.
{NFD, "\xbd\xb2=\xbc ", "\xbd\xb2=\xbc ", true, 8, nil},
}
b := make([]byte, 100)
for i, tt := range tests {
nDst, _, err := tt.f.Transform(b[:tt.dstSize], []byte(tt.in), tt.eof)
out := string(b[:nDst])
if out != tt.out || err != tt.err {
t.Errorf("%d: was %+q (%v); want %+q (%v)", i, out, err, tt.out, tt.err)
}
if want := tt.f.String(tt.in)[:nDst]; want != out {
t.Errorf("%d: incorect normalization: was %+q; want %+q", i, out, want)
}
}
}
var transBufSizes = []int{
MaxTransformChunkSize,
3 * MaxTransformChunkSize / 2,
2 * MaxTransformChunkSize,
3 * MaxTransformChunkSize,
100 * MaxTransformChunkSize,
}
func doTransNorm(f Form, buf []byte, b []byte) []byte {
acc := []byte{}
for p := 0; p < len(b); {
nd, ns, _ := f.Transform(buf[:], b[p:], true)
p += ns
acc = append(acc, buf[:nd]...)
}
return acc
}
func TestTransformNorm(t *testing.T) {
for _, sz := range transBufSizes {
buf := make([]byte, sz)
runNormTests(t, fmt.Sprintf("Transform:%d", sz), func(f Form, out []byte, s string) []byte {
return doTransNorm(f, buf, append(out, s...))
})
}
}

275
vendor/golang.org/x/text/unicode/norm/ucd_test.go generated vendored Normal file
View file

@ -0,0 +1,275 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package norm
import (
"bufio"
"bytes"
"fmt"
"regexp"
"runtime"
"strconv"
"strings"
"sync"
"testing"
"time"
"unicode/utf8"
"golang.org/x/text/internal/gen"
"golang.org/x/text/internal/testtext"
)
var once sync.Once
func skipShort(t *testing.T) {
testtext.SkipIfNotLong(t)
once.Do(func() { loadTestData(t) })
}
// This regression test runs the test set in NormalizationTest.txt
// (taken from http://www.unicode.org/Public/<unicode.Version>/ucd/).
//
// NormalizationTest.txt has form:
// @Part0 # Specific cases
// #
// 1E0A;1E0A;0044 0307;1E0A;0044 0307; # (Ḋ; Ḋ; D◌̇; Ḋ; D◌̇; ) LATIN CAPITAL LETTER D WITH DOT ABOVE
// 1E0C;1E0C;0044 0323;1E0C;0044 0323; # (Ḍ; Ḍ; D◌̣; Ḍ; D◌̣; ) LATIN CAPITAL LETTER D WITH DOT BELOW
//
// Each test has 5 columns (c1, c2, c3, c4, c5), where
// (c1, c2, c3, c4, c5) == (c1, NFC(c1), NFD(c1), NFKC(c1), NFKD(c1))
//
// CONFORMANCE:
// 1. The following invariants must be true for all conformant implementations
//
// NFC
// c2 == NFC(c1) == NFC(c2) == NFC(c3)
// c4 == NFC(c4) == NFC(c5)
//
// NFD
// c3 == NFD(c1) == NFD(c2) == NFD(c3)
// c5 == NFD(c4) == NFD(c5)
//
// NFKC
// c4 == NFKC(c1) == NFKC(c2) == NFKC(c3) == NFKC(c4) == NFKC(c5)
//
// NFKD
// c5 == NFKD(c1) == NFKD(c2) == NFKD(c3) == NFKD(c4) == NFKD(c5)
//
// 2. For every code point X assigned in this version of Unicode that is not
// specifically listed in Part 1, the following invariants must be true
// for all conformant implementations:
//
// X == NFC(X) == NFD(X) == NFKC(X) == NFKD(X)
//
// Column types.
const (
cRaw = iota
cNFC
cNFD
cNFKC
cNFKD
cMaxColumns
)
// Holds data from NormalizationTest.txt
var part []Part
type Part struct {
name string
number int
tests []Test
}
type Test struct {
name string
partnr int
number int
r rune // used for character by character test
cols [cMaxColumns]string // Each has 5 entries, see below.
}
func (t Test) Name() string {
if t.number < 0 {
return part[t.partnr].name
}
return fmt.Sprintf("%s:%d", part[t.partnr].name, t.number)
}
var partRe = regexp.MustCompile(`@Part(\d) # (.*)$`)
var testRe = regexp.MustCompile(`^` + strings.Repeat(`([\dA-F ]+);`, 5) + ` # (.*)$`)
var counter int
// Load the data form NormalizationTest.txt
func loadTestData(t *testing.T) {
f := gen.OpenUCDFile("NormalizationTest.txt")
defer f.Close()
scanner := bufio.NewScanner(f)
for scanner.Scan() {
line := scanner.Text()
if len(line) == 0 || line[0] == '#' {
continue
}
m := partRe.FindStringSubmatch(line)
if m != nil {
if len(m) < 3 {
t.Fatal("Failed to parse Part: ", line)
}
i, err := strconv.Atoi(m[1])
if err != nil {
t.Fatal(err)
}
name := m[2]
part = append(part, Part{name: name[:len(name)-1], number: i})
continue
}
m = testRe.FindStringSubmatch(line)
if m == nil || len(m) < 7 {
t.Fatalf(`Failed to parse: "%s" result: %#v`, line, m)
}
test := Test{name: m[6], partnr: len(part) - 1, number: counter}
counter++
for j := 1; j < len(m)-1; j++ {
for _, split := range strings.Split(m[j], " ") {
r, err := strconv.ParseUint(split, 16, 64)
if err != nil {
t.Fatal(err)
}
if test.r == 0 {
// save for CharacterByCharacterTests
test.r = rune(r)
}
var buf [utf8.UTFMax]byte
sz := utf8.EncodeRune(buf[:], rune(r))
test.cols[j-1] += string(buf[:sz])
}
}
part := &part[len(part)-1]
part.tests = append(part.tests, test)
}
if scanner.Err() != nil {
t.Fatal(scanner.Err())
}
}
func cmpResult(t *testing.T, tc *Test, name string, f Form, gold, test, result string) {
if gold != result {
t.Errorf("%s:%s: %s(%+q)=%+q; want %+q: %s",
tc.Name(), name, fstr[f], test, result, gold, tc.name)
}
}
func cmpIsNormal(t *testing.T, tc *Test, name string, f Form, test string, result, want bool) {
if result != want {
t.Errorf("%s:%s: %s(%+q)=%v; want %v", tc.Name(), name, fstr[f], test, result, want)
}
}
func doTest(t *testing.T, tc *Test, f Form, gold, test string) {
testb := []byte(test)
result := f.Bytes(testb)
cmpResult(t, tc, "Bytes", f, gold, test, string(result))
sresult := f.String(test)
cmpResult(t, tc, "String", f, gold, test, sresult)
acc := []byte{}
i := Iter{}
i.InitString(f, test)
for !i.Done() {
acc = append(acc, i.Next()...)
}
cmpResult(t, tc, "Iter.Next", f, gold, test, string(acc))
buf := make([]byte, 128)
acc = nil
for p := 0; p < len(testb); {
nDst, nSrc, _ := f.Transform(buf, testb[p:], true)
acc = append(acc, buf[:nDst]...)
p += nSrc
}
cmpResult(t, tc, "Transform", f, gold, test, string(acc))
for i := range test {
out := f.Append(f.Bytes([]byte(test[:i])), []byte(test[i:])...)
cmpResult(t, tc, fmt.Sprintf(":Append:%d", i), f, gold, test, string(out))
}
cmpIsNormal(t, tc, "IsNormal", f, test, f.IsNormal([]byte(test)), test == gold)
cmpIsNormal(t, tc, "IsNormalString", f, test, f.IsNormalString(test), test == gold)
}
func doConformanceTests(t *testing.T, tc *Test, partn int) {
for i := 0; i <= 2; i++ {
doTest(t, tc, NFC, tc.cols[1], tc.cols[i])
doTest(t, tc, NFD, tc.cols[2], tc.cols[i])
doTest(t, tc, NFKC, tc.cols[3], tc.cols[i])
doTest(t, tc, NFKD, tc.cols[4], tc.cols[i])
}
for i := 3; i <= 4; i++ {
doTest(t, tc, NFC, tc.cols[3], tc.cols[i])
doTest(t, tc, NFD, tc.cols[4], tc.cols[i])
doTest(t, tc, NFKC, tc.cols[3], tc.cols[i])
doTest(t, tc, NFKD, tc.cols[4], tc.cols[i])
}
}
func TestCharacterByCharacter(t *testing.T) {
skipShort(t)
tests := part[1].tests
var last rune = 0
for i := 0; i <= len(tests); i++ { // last one is special case
var r rune
if i == len(tests) {
r = 0x2FA1E // Don't have to go to 0x10FFFF
} else {
r = tests[i].r
}
for last++; last < r; last++ {
// Check all characters that were not explicitly listed in the test.
tc := &Test{partnr: 1, number: -1}
char := string(last)
doTest(t, tc, NFC, char, char)
doTest(t, tc, NFD, char, char)
doTest(t, tc, NFKC, char, char)
doTest(t, tc, NFKD, char, char)
}
if i < len(tests) {
doConformanceTests(t, &tests[i], 1)
}
}
}
func TestStandardTests(t *testing.T) {
skipShort(t)
for _, j := range []int{0, 2, 3} {
for _, test := range part[j].tests {
doConformanceTests(t, &test, j)
}
}
}
// TestPerformance verifies that normalization is O(n). If any of the
// code does not properly check for maxCombiningChars, normalization
// may exhibit O(n**2) behavior.
func TestPerformance(t *testing.T) {
skipShort(t)
runtime.GOMAXPROCS(2)
success := make(chan bool, 1)
go func() {
buf := bytes.Repeat([]byte("\u035D"), 1024*1024)
buf = append(buf, "\u035B"...)
NFC.Append(nil, buf...)
success <- true
}()
timeout := time.After(1 * time.Second)
select {
case <-success:
// test completed before the timeout
case <-timeout:
t.Errorf(`unexpectedly long time to complete PerformanceTest`)
}
}

113
vendor/golang.org/x/text/unicode/rangetable/gen.go generated vendored Normal file
View file

@ -0,0 +1,113 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
package main
import (
"bytes"
"flag"
"fmt"
"io"
"log"
"reflect"
"sort"
"strings"
"unicode"
"golang.org/x/text/internal/gen"
"golang.org/x/text/internal/ucd"
"golang.org/x/text/unicode/rangetable"
)
var versionList = flag.String("versions", "",
"list of versions for which to generate RangeTables")
const bootstrapMessage = `No versions specified.
To bootstrap the code generation, run:
go run gen.go --versions=4.1.0,5.0.0,6.0.0,6.1.0,6.2.0,6.3.0,7.0.0
and ensure that the latest versions are included by checking:
http://www.unicode.org/Public/`
func getVersions() []string {
if *versionList == "" {
log.Fatal(bootstrapMessage)
}
versions := strings.Split(*versionList, ",")
sort.Strings(versions)
// Ensure that at least the current version is included.
for _, v := range versions {
if v == gen.UnicodeVersion() {
return versions
}
}
versions = append(versions, gen.UnicodeVersion())
sort.Strings(versions)
return versions
}
func main() {
gen.Init()
versions := getVersions()
w := &bytes.Buffer{}
fmt.Fprintf(w, "//go:generate go run gen.go --versions=%s\n\n", strings.Join(versions, ","))
fmt.Fprintf(w, "import \"unicode\"\n\n")
vstr := func(s string) string { return strings.Replace(s, ".", "_", -1) }
fmt.Fprintf(w, "var assigned = map[string]*unicode.RangeTable{\n")
for _, v := range versions {
fmt.Fprintf(w, "\t%q: assigned%s,\n", v, vstr(v))
}
fmt.Fprintf(w, "}\n\n")
var size int
for _, v := range versions {
assigned := []rune{}
r := gen.Open("http://www.unicode.org/Public/", "", v+"/ucd/UnicodeData.txt")
ucd.Parse(r, func(p *ucd.Parser) {
assigned = append(assigned, p.Rune(0))
})
rt := rangetable.New(assigned...)
sz := int(reflect.TypeOf(unicode.RangeTable{}).Size())
sz += int(reflect.TypeOf(unicode.Range16{}).Size()) * len(rt.R16)
sz += int(reflect.TypeOf(unicode.Range32{}).Size()) * len(rt.R32)
fmt.Fprintf(w, "// size %d bytes (%d KiB)\n", sz, sz/1024)
fmt.Fprintf(w, "var assigned%s = ", vstr(v))
print(w, rt)
size += sz
}
fmt.Fprintf(w, "// Total size %d bytes (%d KiB)\n", size, size/1024)
gen.WriteGoFile("tables.go", "rangetable", w.Bytes())
}
func print(w io.Writer, rt *unicode.RangeTable) {
fmt.Fprintln(w, "&unicode.RangeTable{")
fmt.Fprintln(w, "\tR16: []unicode.Range16{")
for _, r := range rt.R16 {
fmt.Fprintf(w, "\t\t{%#04x, %#04x, %d},\n", r.Lo, r.Hi, r.Stride)
}
fmt.Fprintln(w, "\t},")
fmt.Fprintln(w, "\tR32: []unicode.Range32{")
for _, r := range rt.R32 {
fmt.Fprintf(w, "\t\t{%#08x, %#08x, %d},\n", r.Lo, r.Hi, r.Stride)
}
fmt.Fprintln(w, "\t},")
fmt.Fprintf(w, "\tLatinOffset: %d,\n", rt.LatinOffset)
fmt.Fprintf(w, "}\n\n")
}

260
vendor/golang.org/x/text/unicode/rangetable/merge.go generated vendored Normal file
View file

@ -0,0 +1,260 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package rangetable
import (
"unicode"
)
// atEnd is used to mark a completed iteration.
const atEnd = unicode.MaxRune + 1
// Merge returns a new RangeTable that is the union of the given tables.
// It can also be used to compact user-created RangeTables. The entries in
// R16 and R32 for any given RangeTable should be sorted and non-overlapping.
//
// A lookup in the resulting table can be several times faster than using In
// directly on the ranges. Merge is an expensive operation, however, and only
// makes sense if one intends to use the result for more than a couple of
// hundred lookups.
func Merge(ranges ...*unicode.RangeTable) *unicode.RangeTable {
rt := &unicode.RangeTable{}
if len(ranges) == 0 {
return rt
}
iter := tablesIter(make([]tableIndex, len(ranges)))
for i, t := range ranges {
iter[i] = tableIndex{t, 0, atEnd}
if len(t.R16) > 0 {
iter[i].next = rune(t.R16[0].Lo)
}
}
if r0 := iter.next16(); r0.Stride != 0 {
for {
r1 := iter.next16()
if r1.Stride == 0 {
rt.R16 = append(rt.R16, r0)
break
}
stride := r1.Lo - r0.Hi
if (r1.Lo == r1.Hi || stride == r1.Stride) && (r0.Lo == r0.Hi || stride == r0.Stride) {
// Fully merge the next range into the previous one.
r0.Hi, r0.Stride = r1.Hi, stride
continue
} else if stride == r0.Stride {
// Move the first element of r1 to r0. This may eliminate an
// entry.
r0.Hi = r1.Lo
r0.Stride = stride
r1.Lo = r1.Lo + r1.Stride
if r1.Lo > r1.Hi {
continue
}
}
rt.R16 = append(rt.R16, r0)
r0 = r1
}
}
for i, t := range ranges {
iter[i] = tableIndex{t, 0, atEnd}
if len(t.R32) > 0 {
iter[i].next = rune(t.R32[0].Lo)
}
}
if r0 := iter.next32(); r0.Stride != 0 {
for {
r1 := iter.next32()
if r1.Stride == 0 {
rt.R32 = append(rt.R32, r0)
break
}
stride := r1.Lo - r0.Hi
if (r1.Lo == r1.Hi || stride == r1.Stride) && (r0.Lo == r0.Hi || stride == r0.Stride) {
// Fully merge the next range into the previous one.
r0.Hi, r0.Stride = r1.Hi, stride
continue
} else if stride == r0.Stride {
// Move the first element of r1 to r0. This may eliminate an
// entry.
r0.Hi = r1.Lo
r1.Lo = r1.Lo + r1.Stride
if r1.Lo > r1.Hi {
continue
}
}
rt.R32 = append(rt.R32, r0)
r0 = r1
}
}
for i := 0; i < len(rt.R16) && rt.R16[i].Hi <= unicode.MaxLatin1; i++ {
rt.LatinOffset = i + 1
}
return rt
}
type tableIndex struct {
t *unicode.RangeTable
p uint32
next rune
}
type tablesIter []tableIndex
// sortIter does an insertion sort using the next field of tableIndex. Insertion
// sort is a good sorting algorithm for this case.
func sortIter(t []tableIndex) {
for i := range t {
for j := i; j > 0 && t[j-1].next > t[j].next; j-- {
t[j], t[j-1] = t[j-1], t[j]
}
}
}
// next16 finds the ranged to be added to the table. If ranges overlap between
// multiple tables it clips the result to a non-overlapping range if the
// elements are not fully subsumed. It returns a zero range if there are no more
// ranges.
func (ti tablesIter) next16() unicode.Range16 {
sortIter(ti)
t0 := ti[0]
if t0.next == atEnd {
return unicode.Range16{}
}
r0 := t0.t.R16[t0.p]
r0.Lo = uint16(t0.next)
// We restrict the Hi of the current range if it overlaps with another range.
for i := range ti {
tn := ti[i]
// Since our tableIndices are sorted by next, we can break if the there
// is no overlap. The first value of a next range can always be merged
// into the current one, so we can break in case of equality as well.
if rune(r0.Hi) <= tn.next {
break
}
rn := tn.t.R16[tn.p]
rn.Lo = uint16(tn.next)
// Limit r0.Hi based on next ranges in list, but allow it to overlap
// with ranges as long as it subsumes it.
m := (rn.Lo - r0.Lo) % r0.Stride
if m == 0 && (rn.Stride == r0.Stride || rn.Lo == rn.Hi) {
// Overlap, take the min of the two Hi values: for simplicity's sake
// we only process one range at a time.
if r0.Hi > rn.Hi {
r0.Hi = rn.Hi
}
} else {
// Not a compatible stride. Set to the last possible value before
// rn.Lo, but ensure there is at least one value.
if x := rn.Lo - m; r0.Lo <= x {
r0.Hi = x
}
break
}
}
// Update the next values for each table.
for i := range ti {
tn := &ti[i]
if rune(r0.Hi) < tn.next {
break
}
rn := tn.t.R16[tn.p]
stride := rune(rn.Stride)
tn.next += stride * (1 + ((rune(r0.Hi) - tn.next) / stride))
if rune(rn.Hi) < tn.next {
if tn.p++; int(tn.p) == len(tn.t.R16) {
tn.next = atEnd
} else {
tn.next = rune(tn.t.R16[tn.p].Lo)
}
}
}
if r0.Lo == r0.Hi {
r0.Stride = 1
}
return r0
}
// next32 finds the ranged to be added to the table. If ranges overlap between
// multiple tables it clips the result to a non-overlapping range if the
// elements are not fully subsumed. It returns a zero range if there are no more
// ranges.
func (ti tablesIter) next32() unicode.Range32 {
sortIter(ti)
t0 := ti[0]
if t0.next == atEnd {
return unicode.Range32{}
}
r0 := t0.t.R32[t0.p]
r0.Lo = uint32(t0.next)
// We restrict the Hi of the current range if it overlaps with another range.
for i := range ti {
tn := ti[i]
// Since our tableIndices are sorted by next, we can break if the there
// is no overlap. The first value of a next range can always be merged
// into the current one, so we can break in case of equality as well.
if rune(r0.Hi) <= tn.next {
break
}
rn := tn.t.R32[tn.p]
rn.Lo = uint32(tn.next)
// Limit r0.Hi based on next ranges in list, but allow it to overlap
// with ranges as long as it subsumes it.
m := (rn.Lo - r0.Lo) % r0.Stride
if m == 0 && (rn.Stride == r0.Stride || rn.Lo == rn.Hi) {
// Overlap, take the min of the two Hi values: for simplicity's sake
// we only process one range at a time.
if r0.Hi > rn.Hi {
r0.Hi = rn.Hi
}
} else {
// Not a compatible stride. Set to the last possible value before
// rn.Lo, but ensure there is at least one value.
if x := rn.Lo - m; r0.Lo <= x {
r0.Hi = x
}
break
}
}
// Update the next values for each table.
for i := range ti {
tn := &ti[i]
if rune(r0.Hi) < tn.next {
break
}
rn := tn.t.R32[tn.p]
stride := rune(rn.Stride)
tn.next += stride * (1 + ((rune(r0.Hi) - tn.next) / stride))
if rune(rn.Hi) < tn.next {
if tn.p++; int(tn.p) == len(tn.t.R32) {
tn.next = atEnd
} else {
tn.next = rune(tn.t.R32[tn.p].Lo)
}
}
}
if r0.Lo == r0.Hi {
r0.Stride = 1
}
return r0
}

View file

@ -0,0 +1,184 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package rangetable
import (
"testing"
"unicode"
)
var (
maxRuneTable = &unicode.RangeTable{
R32: []unicode.Range32{
{unicode.MaxRune, unicode.MaxRune, 1},
},
}
overlap1 = &unicode.RangeTable{
R16: []unicode.Range16{
{0x100, 0xfffc, 4},
},
R32: []unicode.Range32{
{0x100000, 0x10fffc, 4},
},
}
overlap2 = &unicode.RangeTable{
R16: []unicode.Range16{
{0x101, 0xfffd, 4},
},
R32: []unicode.Range32{
{0x100001, 0x10fffd, 3},
},
}
// The following table should be compacted into two entries for R16 and R32.
optimize = &unicode.RangeTable{
R16: []unicode.Range16{
{0x1, 0x1, 1},
{0x2, 0x2, 1},
{0x3, 0x3, 1},
{0x5, 0x5, 1},
{0x7, 0x7, 1},
{0x9, 0x9, 1},
{0xb, 0xf, 2},
},
R32: []unicode.Range32{
{0x10001, 0x10001, 1},
{0x10002, 0x10002, 1},
{0x10003, 0x10003, 1},
{0x10005, 0x10005, 1},
{0x10007, 0x10007, 1},
{0x10009, 0x10009, 1},
{0x1000b, 0x1000f, 2},
},
}
)
func TestMerge(t *testing.T) {
for i, tt := range [][]*unicode.RangeTable{
{unicode.Cc, unicode.Cf},
{unicode.L, unicode.Ll},
{unicode.L, unicode.Ll, unicode.Lu},
{unicode.Ll, unicode.Lu},
{unicode.M},
unicode.GraphicRanges,
cased,
// Merge R16 only and R32 only and vice versa.
{unicode.Khmer, unicode.Khudawadi},
{unicode.Imperial_Aramaic, unicode.Radical},
// Merge with empty.
{&unicode.RangeTable{}},
{&unicode.RangeTable{}, &unicode.RangeTable{}},
{&unicode.RangeTable{}, &unicode.RangeTable{}, &unicode.RangeTable{}},
{&unicode.RangeTable{}, unicode.Hiragana},
{unicode.Inherited, &unicode.RangeTable{}},
{&unicode.RangeTable{}, unicode.Hanunoo, &unicode.RangeTable{}},
// Hypothetical tables.
{maxRuneTable},
{overlap1, overlap2},
// Optimization
{optimize},
} {
rt := Merge(tt...)
for r := rune(0); r <= unicode.MaxRune; r++ {
if got, want := unicode.Is(rt, r), unicode.In(r, tt...); got != want {
t.Fatalf("%d:%U: got %v; want %v", i, r, got, want)
}
}
// Test optimization and correctness for R16.
for k := 0; k < len(rt.R16)-1; k++ {
if lo, hi := rt.R16[k].Lo, rt.R16[k].Hi; lo > hi {
t.Errorf("%d: Lo (%x) > Hi (%x)", i, lo, hi)
}
if hi, lo := rt.R16[k].Hi, rt.R16[k+1].Lo; hi >= lo {
t.Errorf("%d: Hi (%x) >= next Lo (%x)", i, hi, lo)
}
if rt.R16[k].Hi+rt.R16[k].Stride == rt.R16[k+1].Lo {
t.Errorf("%d: missed optimization for R16 at %d between %X and %x",
i, k, rt.R16[k], rt.R16[k+1])
}
}
// Test optimization and correctness for R32.
for k := 0; k < len(rt.R32)-1; k++ {
if lo, hi := rt.R32[k].Lo, rt.R32[k].Hi; lo > hi {
t.Errorf("%d: Lo (%x) > Hi (%x)", i, lo, hi)
}
if hi, lo := rt.R32[k].Hi, rt.R32[k+1].Lo; hi >= lo {
t.Errorf("%d: Hi (%x) >= next Lo (%x)", i, hi, lo)
}
if rt.R32[k].Hi+rt.R32[k].Stride == rt.R32[k+1].Lo {
t.Errorf("%d: missed optimization for R32 at %d between %X and %X",
i, k, rt.R32[k], rt.R32[k+1])
}
}
}
}
const runes = "Hello World in 2015!,\U0010fffd"
func BenchmarkNotMerged(t *testing.B) {
for i := 0; i < t.N; i++ {
for _, r := range runes {
unicode.In(r, unicode.GraphicRanges...)
}
}
}
func BenchmarkMerged(t *testing.B) {
rt := Merge(unicode.GraphicRanges...)
for i := 0; i < t.N; i++ {
for _, r := range runes {
unicode.Is(rt, r)
}
}
}
var cased = []*unicode.RangeTable{
unicode.Lower,
unicode.Upper,
unicode.Title,
unicode.Other_Lowercase,
unicode.Other_Uppercase,
}
func BenchmarkNotMergedCased(t *testing.B) {
for i := 0; i < t.N; i++ {
for _, r := range runes {
unicode.In(r, cased...)
}
}
}
func BenchmarkMergedCased(t *testing.B) {
// This reduces len(R16) from 243 to 82 and len(R32) from 65 to 35 for
// Unicode 7.0.0.
rt := Merge(cased...)
for i := 0; i < t.N; i++ {
for _, r := range runes {
unicode.Is(rt, r)
}
}
}
func BenchmarkInit(t *testing.B) {
for i := 0; i < t.N; i++ {
Merge(cased...)
Merge(unicode.GraphicRanges...)
}
}
func BenchmarkInit2(t *testing.B) {
// Hypothetical near-worst-case performance.
for i := 0; i < t.N; i++ {
Merge(overlap1, overlap2)
}
}

View file

@ -0,0 +1,70 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package rangetable provides utilities for creating and inspecting
// unicode.RangeTables.
package rangetable
import (
"sort"
"unicode"
)
// New creates a RangeTable from the given runes, which may contain duplicates.
func New(r ...rune) *unicode.RangeTable {
if len(r) == 0 {
return &unicode.RangeTable{}
}
sort.Sort(byRune(r))
// Remove duplicates.
k := 1
for i := 1; i < len(r); i++ {
if r[k-1] != r[i] {
r[k] = r[i]
k++
}
}
var rt unicode.RangeTable
for _, r := range r[:k] {
if r <= 0xFFFF {
rt.R16 = append(rt.R16, unicode.Range16{Lo: uint16(r), Hi: uint16(r), Stride: 1})
} else {
rt.R32 = append(rt.R32, unicode.Range32{Lo: uint32(r), Hi: uint32(r), Stride: 1})
}
}
// Optimize RangeTable.
return Merge(&rt)
}
type byRune []rune
func (r byRune) Len() int { return len(r) }
func (r byRune) Swap(i, j int) { r[i], r[j] = r[j], r[i] }
func (r byRune) Less(i, j int) bool { return r[i] < r[j] }
// Visit visits all runes in the given RangeTable in order, calling fn for each.
func Visit(rt *unicode.RangeTable, fn func(rune)) {
for _, r16 := range rt.R16 {
for r := rune(r16.Lo); r <= rune(r16.Hi); r += rune(r16.Stride) {
fn(r)
}
}
for _, r32 := range rt.R32 {
for r := rune(r32.Lo); r <= rune(r32.Hi); r += rune(r32.Stride) {
fn(r)
}
}
}
// Assigned returns a RangeTable with all assigned code points for a given
// Unicode version. This includes graphic, format, control, and private-use
// characters. It returns nil if the data for the given version is not
// available.
func Assigned(version string) *unicode.RangeTable {
return assigned[version]
}

View file

@ -0,0 +1,55 @@
package rangetable
import (
"reflect"
"testing"
"unicode"
)
var (
empty = &unicode.RangeTable{}
many = &unicode.RangeTable{
R16: []unicode.Range16{{0, 0xffff, 5}},
R32: []unicode.Range32{{0x10004, 0x10009, 5}},
LatinOffset: 0,
}
)
func TestVisit(t *testing.T) {
Visit(empty, func(got rune) {
t.Error("call from empty RangeTable")
})
var want rune
Visit(many, func(got rune) {
if got != want {
t.Errorf("got %U; want %U", got, want)
}
want += 5
})
if want -= 5; want != 0x10009 {
t.Errorf("last run was %U; want U+10009", want)
}
}
func TestNew(t *testing.T) {
for i, rt := range []*unicode.RangeTable{
empty,
unicode.Co,
unicode.Letter,
unicode.ASCII_Hex_Digit,
many,
maxRuneTable,
} {
var got, want []rune
Visit(rt, func(r rune) {
want = append(want, r)
})
Visit(New(want...), func(r rune) {
got = append(got, r)
})
if !reflect.DeepEqual(got, want) {
t.Errorf("%d:\ngot %v;\nwant %v", i, got, want)
}
}
}

5735
vendor/golang.org/x/text/unicode/rangetable/tables.go generated vendored Normal file

File diff suppressed because it is too large Load diff

59
vendor/golang.org/x/text/unicode/runenames/bits.go generated vendored Normal file
View file

@ -0,0 +1,59 @@
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
package runenames
// This file contains code common to gen.go and the package code.
// The mapping from rune to string (i.e. offset and length in the data string)
// is encoded as a two level table. The first level maps from contiguous rune
// ranges [runeOffset, runeOffset+runeLength) to entries. Entries are either
// direct (for repeated names such as "<CJK Ideograph>") or indirect (for runs
// of unique names such as "SPACE", "EXCLAMATION MARK", "QUOTATION MARK", ...).
//
// Each first level table element is 64 bits. The runeOffset (21 bits) and
// runeLength (16 bits) take the 37 high bits. The entry takes the 27 low bits,
// with directness encoded in the least significant bit.
//
// A direct entry encodes a dataOffset (18 bits) and dataLength (8 bits) in the
// data string. 18 bits is too short to encode the entire data string's length,
// but the data string's contents are arranged so that all of the few direct
// entries' offsets come before all of the many indirect entries' offsets.
//
// An indirect entry encodes a dataBase (10 bits) and a table1Offset (16 bits).
// The table1Offset is the start of a range in the second level table. The
// length of that range is the same as the runeLength.
//
// Each second level table element is 16 bits, an index into data, relative to
// a bias equal to (dataBase << dataBaseUnit). That (bias + index) is the
// (dataOffset + dataLength) in the data string. The dataOffset is implied by
// the previous table element (with the same implicit bias).
const (
bitsRuneOffset = 21
bitsRuneLength = 16
bitsDataOffset = 18
bitsDataLength = 8
bitsDirect = 1
bitsDataBase = 10
bitsTable1Offset = 16
shiftRuneOffset = 0 + bitsDirect + bitsDataLength + bitsDataOffset + bitsRuneLength
shiftRuneLength = 0 + bitsDirect + bitsDataLength + bitsDataOffset
shiftDataOffset = 0 + bitsDirect + bitsDataLength
shiftDataLength = 0 + bitsDirect
shiftDirect = 0
shiftDataBase = 0 + bitsDirect + bitsTable1Offset
shiftTable1Offset = 0 + bitsDirect
maskRuneLength = 1<<bitsRuneLength - 1
maskDataOffset = 1<<bitsDataOffset - 1
maskDataLength = 1<<bitsDataLength - 1
maskDirect = 1<<bitsDirect - 1
maskDataBase = 1<<bitsDataBase - 1
maskTable1Offset = 1<<bitsTable1Offset - 1
dataBaseUnit = 10
)

View file

@ -0,0 +1,118 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runenames_test
import (
"fmt"
"golang.org/x/text/unicode/runenames"
)
func Example() {
runes := []rune{
-1,
'\U00000000',
'\U0000001f',
'\U00000020',
'\U00000021',
'\U00000041',
'\U0000007e',
'\U0000007f',
'\U00000080',
'\U000000e0',
'\U0000037f',
'\U00000380',
'\U00000381',
'\U00000382',
'\U00000383',
'\U00000384',
'\U00000385',
'\U00000386',
'\U000007c0',
'\U00002603',
'\U000033ff',
'\U00003400',
'\U00003401',
'\U00003402',
'\U00004dc0',
'\U00009fd5',
'\U00009fd6',
'\U00009fff',
'\U0000a000',
0xdc00, // '\U0000dc00' (Low Surrogate) is an invalid Go literal.
'\U0000f800',
'\U0000fffc',
'\U0000fffd',
'\U0000fffe',
'\U0000ffff',
'\U00010000',
'\U0001f574',
'\U0002fa1d',
'\U0002fa1e',
'\U000e0100',
'\U000e01ef',
'\U000e01f0',
'\U00100000',
'\U0010fffd',
'\U0010fffe',
'\U0010ffff',
}
for _, r := range runes {
fmt.Printf("%08x %q\n", r, runenames.Name(r))
}
// Output:
// -0000001 ""
// 00000000 "<control>"
// 0000001f "<control>"
// 00000020 "SPACE"
// 00000021 "EXCLAMATION MARK"
// 00000041 "LATIN CAPITAL LETTER A"
// 0000007e "TILDE"
// 0000007f "<control>"
// 00000080 "<control>"
// 000000e0 "LATIN SMALL LETTER A WITH GRAVE"
// 0000037f "GREEK CAPITAL LETTER YOT"
// 00000380 ""
// 00000381 ""
// 00000382 ""
// 00000383 ""
// 00000384 "GREEK TONOS"
// 00000385 "GREEK DIALYTIKA TONOS"
// 00000386 "GREEK CAPITAL LETTER ALPHA WITH TONOS"
// 000007c0 "NKO DIGIT ZERO"
// 00002603 "SNOWMAN"
// 000033ff "SQUARE GAL"
// 00003400 "<CJK Ideograph Extension A>"
// 00003401 "<CJK Ideograph Extension A>"
// 00003402 "<CJK Ideograph Extension A>"
// 00004dc0 "HEXAGRAM FOR THE CREATIVE HEAVEN"
// 00009fd5 "<CJK Ideograph>"
// 00009fd6 ""
// 00009fff ""
// 0000a000 "YI SYLLABLE IT"
// 0000dc00 "<Low Surrogate>"
// 0000f800 "<Private Use>"
// 0000fffc "OBJECT REPLACEMENT CHARACTER"
// 0000fffd "REPLACEMENT CHARACTER"
// 0000fffe ""
// 0000ffff ""
// 00010000 "LINEAR B SYLLABLE B008 A"
// 0001f574 "MAN IN BUSINESS SUIT LEVITATING"
// 0002fa1d "CJK COMPATIBILITY IDEOGRAPH-2FA1D"
// 0002fa1e ""
// 000e0100 "VARIATION SELECTOR-17"
// 000e01ef "VARIATION SELECTOR-256"
// 000e01f0 ""
// 00100000 "<Plane 16 Private Use>"
// 0010fffd "<Plane 16 Private Use>"
// 0010fffe ""
// 0010ffff ""
}

195
vendor/golang.org/x/text/unicode/runenames/gen.go generated vendored Normal file
View file

@ -0,0 +1,195 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
package main
import (
"log"
"strings"
"unicode"
"golang.org/x/text/internal/gen"
"golang.org/x/text/internal/ucd"
)
// snippet is a slice of data; data is the concatenation of all of the names.
type snippet struct {
offset int
length int
s string
}
func makeTable0EntryDirect(rOffset, rLength, dOffset, dLength int) uint64 {
if rOffset >= 1<<bitsRuneOffset {
log.Fatalf("makeTable0EntryDirect: rOffset %d is too large", rOffset)
}
if rLength >= 1<<bitsRuneLength {
log.Fatalf("makeTable0EntryDirect: rLength %d is too large", rLength)
}
if dOffset >= 1<<bitsDataOffset {
log.Fatalf("makeTable0EntryDirect: dOffset %d is too large", dOffset)
}
if dLength >= 1<<bitsRuneLength {
log.Fatalf("makeTable0EntryDirect: dLength %d is too large", dLength)
}
return uint64(rOffset)<<shiftRuneOffset |
uint64(rLength)<<shiftRuneLength |
uint64(dOffset)<<shiftDataOffset |
uint64(dLength)<<shiftDataLength |
1 // Direct bit.
}
func makeTable0EntryIndirect(rOffset, rLength, dBase, t1Offset int) uint64 {
if rOffset >= 1<<bitsRuneOffset {
log.Fatalf("makeTable0EntryIndirect: rOffset %d is too large", rOffset)
}
if rLength >= 1<<bitsRuneLength {
log.Fatalf("makeTable0EntryIndirect: rLength %d is too large", rLength)
}
if dBase >= 1<<bitsDataBase {
log.Fatalf("makeTable0EntryIndirect: dBase %d is too large", dBase)
}
if t1Offset >= 1<<bitsTable1Offset {
log.Fatalf("makeTable0EntryIndirect: t1Offset %d is too large", t1Offset)
}
return uint64(rOffset)<<shiftRuneOffset |
uint64(rLength)<<shiftRuneLength |
uint64(dBase)<<shiftDataBase |
uint64(t1Offset)<<shiftTable1Offset |
0 // Direct bit.
}
func makeTable1Entry(x int) uint16 {
if x < 0 || 0xffff < x {
log.Fatalf("makeTable1Entry: entry %d is out of range", x)
}
return uint16(x)
}
var (
data []byte
snippets = make([]snippet, 1+unicode.MaxRune)
)
func main() {
gen.Init()
names, counts := parse()
appendRepeatNames(names, counts)
appendUniqueNames(names, counts)
table0, table1 := makeTables()
gen.Repackage("gen_bits.go", "bits.go", "runenames")
w := gen.NewCodeWriter()
w.WriteVar("table0", table0)
w.WriteVar("table1", table1)
w.WriteConst("data", string(data))
w.WriteGoFile("tables.go", "runenames")
}
func parse() (names []string, counts map[string]int) {
names = make([]string, 1+unicode.MaxRune)
counts = map[string]int{}
ucd.Parse(gen.OpenUCDFile("UnicodeData.txt"), func(p *ucd.Parser) {
r, s := p.Rune(0), p.String(ucd.Name)
if s == "" {
return
}
if s[0] == '<' {
const first = ", First>"
if i := strings.Index(s, first); i >= 0 {
s = s[:i] + ">"
}
}
names[r] = s
counts[s]++
})
return names, counts
}
func appendRepeatNames(names []string, counts map[string]int) {
alreadySeen := map[string]snippet{}
for r, s := range names {
if s == "" || counts[s] == 1 {
continue
}
if s[0] != '<' {
log.Fatalf("Repeated name %q does not start with a '<'", s)
}
if z, ok := alreadySeen[s]; ok {
snippets[r] = z
continue
}
z := snippet{
offset: len(data),
length: len(s),
s: s,
}
data = append(data, s...)
snippets[r] = z
alreadySeen[s] = z
}
}
func appendUniqueNames(names []string, counts map[string]int) {
for r, s := range names {
if s == "" || counts[s] != 1 {
continue
}
if s[0] == '<' {
log.Fatalf("Unique name %q starts with a '<'", s)
}
z := snippet{
offset: len(data),
length: len(s),
s: s,
}
data = append(data, s...)
snippets[r] = z
}
}
func makeTables() (table0 []uint64, table1 []uint16) {
for i := 0; i < len(snippets); {
zi := snippets[i]
if zi == (snippet{}) {
i++
continue
}
// Look for repeat names. If we have one, we only need a table0 entry.
j := i + 1
for ; j < len(snippets) && zi == snippets[j]; j++ {
}
if j > i+1 {
table0 = append(table0, makeTable0EntryDirect(i, j-i, zi.offset, zi.length))
i = j
continue
}
// Otherwise, we have a run of unique names. We need one table0 entry
// and two or more table1 entries.
base := zi.offset &^ (1<<dataBaseUnit - 1)
t1Offset := len(table1) + 1
table1 = append(table1, makeTable1Entry(zi.offset-base))
table1 = append(table1, makeTable1Entry(zi.offset+zi.length-base))
for ; j < len(snippets) && snippets[j] != (snippet{}); j++ {
zj := snippets[j]
if data[zj.offset] == '<' {
break
}
table1 = append(table1, makeTable1Entry(zj.offset+zj.length-base))
}
table0 = append(table0, makeTable0EntryIndirect(i, j-i, base>>dataBaseUnit, t1Offset))
i = j
}
return table0, table1
}

63
vendor/golang.org/x/text/unicode/runenames/gen_bits.go generated vendored Normal file
View file

@ -0,0 +1,63 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
package main
// This file contains code common to gen.go and the package code.
// The mapping from rune to string (i.e. offset and length in the data string)
// is encoded as a two level table. The first level maps from contiguous rune
// ranges [runeOffset, runeOffset+runeLength) to entries. Entries are either
// direct (for repeated names such as "<CJK Ideograph>") or indirect (for runs
// of unique names such as "SPACE", "EXCLAMATION MARK", "QUOTATION MARK", ...).
//
// Each first level table element is 64 bits. The runeOffset (21 bits) and
// runeLength (16 bits) take the 37 high bits. The entry takes the 27 low bits,
// with directness encoded in the least significant bit.
//
// A direct entry encodes a dataOffset (18 bits) and dataLength (8 bits) in the
// data string. 18 bits is too short to encode the entire data string's length,
// but the data string's contents are arranged so that all of the few direct
// entries' offsets come before all of the many indirect entries' offsets.
//
// An indirect entry encodes a dataBase (10 bits) and a table1Offset (16 bits).
// The table1Offset is the start of a range in the second level table. The
// length of that range is the same as the runeLength.
//
// Each second level table element is 16 bits, an index into data, relative to
// a bias equal to (dataBase << dataBaseUnit). That (bias + index) is the
// (dataOffset + dataLength) in the data string. The dataOffset is implied by
// the previous table element (with the same implicit bias).
const (
bitsRuneOffset = 21
bitsRuneLength = 16
bitsDataOffset = 18
bitsDataLength = 8
bitsDirect = 1
bitsDataBase = 10
bitsTable1Offset = 16
shiftRuneOffset = 0 + bitsDirect + bitsDataLength + bitsDataOffset + bitsRuneLength
shiftRuneLength = 0 + bitsDirect + bitsDataLength + bitsDataOffset
shiftDataOffset = 0 + bitsDirect + bitsDataLength
shiftDataLength = 0 + bitsDirect
shiftDirect = 0
shiftDataBase = 0 + bitsDirect + bitsTable1Offset
shiftTable1Offset = 0 + bitsDirect
maskRuneLength = 1<<bitsRuneLength - 1
maskDataOffset = 1<<bitsDataOffset - 1
maskDataLength = 1<<bitsDataLength - 1
maskDirect = 1<<bitsDirect - 1
maskDataBase = 1<<bitsDataBase - 1
maskTable1Offset = 1<<bitsTable1Offset - 1
dataBaseUnit = 10
)

View file

@ -0,0 +1,48 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:generate go run gen.go gen_bits.go
// Package runenames provides rune names from the Unicode Character Database.
// For example, the name for '\u0100' is "LATIN CAPITAL LETTER A WITH MACRON".
//
// See http://www.unicode.org/Public/UCD/latest/ucd/UnicodeData.txt
package runenames
import (
"sort"
)
// Name returns the name for r.
func Name(r rune) string {
i := sort.Search(len(table0), func(j int) bool {
e := table0[j]
rOffset := rune(e >> shiftRuneOffset)
return r < rOffset
})
if i == 0 {
return ""
}
e := table0[i-1]
rOffset := rune(e >> shiftRuneOffset)
rLength := rune(e>>shiftRuneLength) & maskRuneLength
if r >= rOffset+rLength {
return ""
}
if (e>>shiftDirect)&maskDirect != 0 {
o := int(e>>shiftDataOffset) & maskDataOffset
n := int(e>>shiftDataLength) & maskDataLength
return data[o : o+n]
}
base := uint32(e>>shiftDataBase) & maskDataBase
base <<= dataBaseUnit
j := rune(e>>shiftTable1Offset) & maskTable1Offset
j += r - rOffset
d0 := base + uint32(table1[j-1]) // dataOffset
d1 := base + uint32(table1[j-0]) // dataOffset + dataLength
return data[d0:d1]
}

View file

@ -0,0 +1,46 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runenames
import (
"strings"
"testing"
"unicode"
"golang.org/x/text/internal/gen"
"golang.org/x/text/internal/testtext"
"golang.org/x/text/internal/ucd"
)
func TestName(t *testing.T) {
testtext.SkipIfNotLong(t)
wants := make([]string, 1+unicode.MaxRune)
ucd.Parse(gen.OpenUCDFile("UnicodeData.txt"), func(p *ucd.Parser) {
r, s := p.Rune(0), p.String(ucd.Name)
if s == "" {
return
}
if s[0] == '<' {
const first = ", First>"
if i := strings.Index(s, first); i >= 0 {
s = s[:i] + ">"
}
}
wants[r] = s
})
nErrors := 0
for r, want := range wants {
got := Name(rune(r))
if got != want {
t.Errorf("r=%#08x: got %q, want %q", r, got, want)
nErrors++
if nErrors == 100 {
t.Fatal("too many errors")
}
}
}
}

15514
vendor/golang.org/x/text/unicode/runenames/tables.go generated vendored Normal file

File diff suppressed because it is too large Load diff