mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 18:43:34 +00:00
vendor files
This commit is contained in:
16
vendor/golang.org/x/text/language/Makefile
generated
vendored
Normal file
16
vendor/golang.org/x/text/language/Makefile
generated
vendored
Normal file
@ -0,0 +1,16 @@
|
||||
# Copyright 2013 The Go Authors. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style
|
||||
# license that can be found in the LICENSE file.
|
||||
|
||||
CLEANFILES+=maketables
|
||||
|
||||
maketables: maketables.go
|
||||
go build $^
|
||||
|
||||
tables: maketables
|
||||
./maketables > tables.go
|
||||
gofmt -w -s tables.go
|
||||
|
||||
# Build (but do not run) maketables during testing,
|
||||
# just to make sure it still compiles.
|
||||
testshort: maketables
|
16
vendor/golang.org/x/text/language/common.go
generated
vendored
Normal file
16
vendor/golang.org/x/text/language/common.go
generated
vendored
Normal file
@ -0,0 +1,16 @@
|
||||
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
|
||||
|
||||
package language
|
||||
|
||||
// This file contains code common to the maketables.go and the package code.
|
||||
|
||||
// langAliasType is the type of an alias in langAliasMap.
|
||||
type langAliasType int8
|
||||
|
||||
const (
|
||||
langDeprecated langAliasType = iota
|
||||
langMacro
|
||||
langLegacy
|
||||
|
||||
langAliasTypeUnknown langAliasType = -1
|
||||
)
|
197
vendor/golang.org/x/text/language/coverage.go
generated
vendored
Normal file
197
vendor/golang.org/x/text/language/coverage.go
generated
vendored
Normal file
@ -0,0 +1,197 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package language
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// The Coverage interface is used to define the level of coverage of an
|
||||
// internationalization service. Note that not all types are supported by all
|
||||
// services. As lists may be generated on the fly, it is recommended that users
|
||||
// of a Coverage cache the results.
|
||||
type Coverage interface {
|
||||
// Tags returns the list of supported tags.
|
||||
Tags() []Tag
|
||||
|
||||
// BaseLanguages returns the list of supported base languages.
|
||||
BaseLanguages() []Base
|
||||
|
||||
// Scripts returns the list of supported scripts.
|
||||
Scripts() []Script
|
||||
|
||||
// Regions returns the list of supported regions.
|
||||
Regions() []Region
|
||||
}
|
||||
|
||||
var (
|
||||
// Supported defines a Coverage that lists all supported subtags. Tags
|
||||
// always returns nil.
|
||||
Supported Coverage = allSubtags{}
|
||||
)
|
||||
|
||||
// TODO:
|
||||
// - Support Variants, numbering systems.
|
||||
// - CLDR coverage levels.
|
||||
// - Set of common tags defined in this package.
|
||||
|
||||
type allSubtags struct{}
|
||||
|
||||
// Regions returns the list of supported regions. As all regions are in a
|
||||
// consecutive range, it simply returns a slice of numbers in increasing order.
|
||||
// The "undefined" region is not returned.
|
||||
func (s allSubtags) Regions() []Region {
|
||||
reg := make([]Region, numRegions)
|
||||
for i := range reg {
|
||||
reg[i] = Region{regionID(i + 1)}
|
||||
}
|
||||
return reg
|
||||
}
|
||||
|
||||
// Scripts returns the list of supported scripts. As all scripts are in a
|
||||
// consecutive range, it simply returns a slice of numbers in increasing order.
|
||||
// The "undefined" script is not returned.
|
||||
func (s allSubtags) Scripts() []Script {
|
||||
scr := make([]Script, numScripts)
|
||||
for i := range scr {
|
||||
scr[i] = Script{scriptID(i + 1)}
|
||||
}
|
||||
return scr
|
||||
}
|
||||
|
||||
// BaseLanguages returns the list of all supported base languages. It generates
|
||||
// the list by traversing the internal structures.
|
||||
func (s allSubtags) BaseLanguages() []Base {
|
||||
base := make([]Base, 0, numLanguages)
|
||||
for i := 0; i < langNoIndexOffset; i++ {
|
||||
// We included "und" already for the value 0.
|
||||
if i != nonCanonicalUnd {
|
||||
base = append(base, Base{langID(i)})
|
||||
}
|
||||
}
|
||||
i := langNoIndexOffset
|
||||
for _, v := range langNoIndex {
|
||||
for k := 0; k < 8; k++ {
|
||||
if v&1 == 1 {
|
||||
base = append(base, Base{langID(i)})
|
||||
}
|
||||
v >>= 1
|
||||
i++
|
||||
}
|
||||
}
|
||||
return base
|
||||
}
|
||||
|
||||
// Tags always returns nil.
|
||||
func (s allSubtags) Tags() []Tag {
|
||||
return nil
|
||||
}
|
||||
|
||||
// coverage is used used by NewCoverage which is used as a convenient way for
|
||||
// creating Coverage implementations for partially defined data. Very often a
|
||||
// package will only need to define a subset of slices. coverage provides a
|
||||
// convenient way to do this. Moreover, packages using NewCoverage, instead of
|
||||
// their own implementation, will not break if later new slice types are added.
|
||||
type coverage struct {
|
||||
tags func() []Tag
|
||||
bases func() []Base
|
||||
scripts func() []Script
|
||||
regions func() []Region
|
||||
}
|
||||
|
||||
func (s *coverage) Tags() []Tag {
|
||||
if s.tags == nil {
|
||||
return nil
|
||||
}
|
||||
return s.tags()
|
||||
}
|
||||
|
||||
// bases implements sort.Interface and is used to sort base languages.
|
||||
type bases []Base
|
||||
|
||||
func (b bases) Len() int {
|
||||
return len(b)
|
||||
}
|
||||
|
||||
func (b bases) Swap(i, j int) {
|
||||
b[i], b[j] = b[j], b[i]
|
||||
}
|
||||
|
||||
func (b bases) Less(i, j int) bool {
|
||||
return b[i].langID < b[j].langID
|
||||
}
|
||||
|
||||
// BaseLanguages returns the result from calling s.bases if it is specified or
|
||||
// otherwise derives the set of supported base languages from tags.
|
||||
func (s *coverage) BaseLanguages() []Base {
|
||||
if s.bases == nil {
|
||||
tags := s.Tags()
|
||||
if len(tags) == 0 {
|
||||
return nil
|
||||
}
|
||||
a := make([]Base, len(tags))
|
||||
for i, t := range tags {
|
||||
a[i] = Base{langID(t.lang)}
|
||||
}
|
||||
sort.Sort(bases(a))
|
||||
k := 0
|
||||
for i := 1; i < len(a); i++ {
|
||||
if a[k] != a[i] {
|
||||
k++
|
||||
a[k] = a[i]
|
||||
}
|
||||
}
|
||||
return a[:k+1]
|
||||
}
|
||||
return s.bases()
|
||||
}
|
||||
|
||||
func (s *coverage) Scripts() []Script {
|
||||
if s.scripts == nil {
|
||||
return nil
|
||||
}
|
||||
return s.scripts()
|
||||
}
|
||||
|
||||
func (s *coverage) Regions() []Region {
|
||||
if s.regions == nil {
|
||||
return nil
|
||||
}
|
||||
return s.regions()
|
||||
}
|
||||
|
||||
// NewCoverage returns a Coverage for the given lists. It is typically used by
|
||||
// packages providing internationalization services to define their level of
|
||||
// coverage. A list may be of type []T or func() []T, where T is either Tag,
|
||||
// Base, Script or Region. The returned Coverage derives the value for Bases
|
||||
// from Tags if no func or slice for []Base is specified. For other unspecified
|
||||
// types the returned Coverage will return nil for the respective methods.
|
||||
func NewCoverage(list ...interface{}) Coverage {
|
||||
s := &coverage{}
|
||||
for _, x := range list {
|
||||
switch v := x.(type) {
|
||||
case func() []Base:
|
||||
s.bases = v
|
||||
case func() []Script:
|
||||
s.scripts = v
|
||||
case func() []Region:
|
||||
s.regions = v
|
||||
case func() []Tag:
|
||||
s.tags = v
|
||||
case []Base:
|
||||
s.bases = func() []Base { return v }
|
||||
case []Script:
|
||||
s.scripts = func() []Script { return v }
|
||||
case []Region:
|
||||
s.regions = func() []Region { return v }
|
||||
case []Tag:
|
||||
s.tags = func() []Tag { return v }
|
||||
default:
|
||||
panic(fmt.Sprintf("language: unsupported set type %T", v))
|
||||
}
|
||||
}
|
||||
return s
|
||||
}
|
154
vendor/golang.org/x/text/language/coverage_test.go
generated
vendored
Normal file
154
vendor/golang.org/x/text/language/coverage_test.go
generated
vendored
Normal file
@ -0,0 +1,154 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package language
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestSupported(t *testing.T) {
|
||||
// To prove the results are correct for a type, we test that the number of
|
||||
// results is identical to the number of results on record, that all results
|
||||
// are distinct and that all results are valid.
|
||||
tests := map[string]int{
|
||||
"BaseLanguages": numLanguages,
|
||||
"Scripts": numScripts,
|
||||
"Regions": numRegions,
|
||||
"Tags": 0,
|
||||
}
|
||||
sup := reflect.ValueOf(Supported)
|
||||
for name, num := range tests {
|
||||
v := sup.MethodByName(name).Call(nil)[0]
|
||||
if n := v.Len(); n != num {
|
||||
t.Errorf("len(%s()) was %d; want %d", name, n, num)
|
||||
}
|
||||
dup := make(map[string]bool)
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
x := v.Index(i).Interface()
|
||||
// An invalid value will either cause a crash or result in a
|
||||
// duplicate when passed to Sprint.
|
||||
s := fmt.Sprint(x)
|
||||
if dup[s] {
|
||||
t.Errorf("%s: duplicate entry %q", name, s)
|
||||
}
|
||||
dup[s] = true
|
||||
}
|
||||
if len(dup) != v.Len() {
|
||||
t.Errorf("%s: # unique entries was %d; want %d", name, len(dup), v.Len())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewCoverage(t *testing.T) {
|
||||
bases := []Base{Base{0}, Base{3}, Base{7}}
|
||||
scripts := []Script{Script{11}, Script{17}, Script{23}}
|
||||
regions := []Region{Region{101}, Region{103}, Region{107}}
|
||||
tags := []Tag{Make("pt"), Make("en"), Make("en-GB"), Make("en-US"), Make("pt-PT")}
|
||||
fbases := func() []Base { return bases }
|
||||
fscripts := func() []Script { return scripts }
|
||||
fregions := func() []Region { return regions }
|
||||
ftags := func() []Tag { return tags }
|
||||
|
||||
tests := []struct {
|
||||
desc string
|
||||
list []interface{}
|
||||
bases []Base
|
||||
scripts []Script
|
||||
regions []Region
|
||||
tags []Tag
|
||||
}{
|
||||
{
|
||||
desc: "empty",
|
||||
},
|
||||
{
|
||||
desc: "bases",
|
||||
list: []interface{}{bases},
|
||||
bases: bases,
|
||||
},
|
||||
{
|
||||
desc: "scripts",
|
||||
list: []interface{}{scripts},
|
||||
scripts: scripts,
|
||||
},
|
||||
{
|
||||
desc: "regions",
|
||||
list: []interface{}{regions},
|
||||
regions: regions,
|
||||
},
|
||||
{
|
||||
desc: "bases derives from tags",
|
||||
list: []interface{}{tags},
|
||||
bases: []Base{Base{_en}, Base{_pt}},
|
||||
tags: tags,
|
||||
},
|
||||
{
|
||||
desc: "tags and bases",
|
||||
list: []interface{}{tags, bases},
|
||||
bases: bases,
|
||||
tags: tags,
|
||||
},
|
||||
{
|
||||
desc: "fully specified",
|
||||
list: []interface{}{tags, bases, scripts, regions},
|
||||
bases: bases,
|
||||
scripts: scripts,
|
||||
regions: regions,
|
||||
tags: tags,
|
||||
},
|
||||
{
|
||||
desc: "bases func",
|
||||
list: []interface{}{fbases},
|
||||
bases: bases,
|
||||
},
|
||||
{
|
||||
desc: "scripts func",
|
||||
list: []interface{}{fscripts},
|
||||
scripts: scripts,
|
||||
},
|
||||
{
|
||||
desc: "regions func",
|
||||
list: []interface{}{fregions},
|
||||
regions: regions,
|
||||
},
|
||||
{
|
||||
desc: "tags func",
|
||||
list: []interface{}{ftags},
|
||||
bases: []Base{Base{_en}, Base{_pt}},
|
||||
tags: tags,
|
||||
},
|
||||
{
|
||||
desc: "tags and bases",
|
||||
list: []interface{}{ftags, fbases},
|
||||
bases: bases,
|
||||
tags: tags,
|
||||
},
|
||||
{
|
||||
desc: "fully specified",
|
||||
list: []interface{}{ftags, fbases, fscripts, fregions},
|
||||
bases: bases,
|
||||
scripts: scripts,
|
||||
regions: regions,
|
||||
tags: tags,
|
||||
},
|
||||
}
|
||||
|
||||
for i, tt := range tests {
|
||||
l := NewCoverage(tt.list...)
|
||||
if a := l.BaseLanguages(); !reflect.DeepEqual(a, tt.bases) {
|
||||
t.Errorf("%d:%s: BaseLanguages was %v; want %v", i, tt.desc, a, tt.bases)
|
||||
}
|
||||
if a := l.Scripts(); !reflect.DeepEqual(a, tt.scripts) {
|
||||
t.Errorf("%d:%s: Scripts was %v; want %v", i, tt.desc, a, tt.scripts)
|
||||
}
|
||||
if a := l.Regions(); !reflect.DeepEqual(a, tt.regions) {
|
||||
t.Errorf("%d:%s: Regions was %v; want %v", i, tt.desc, a, tt.regions)
|
||||
}
|
||||
if a := l.Tags(); !reflect.DeepEqual(a, tt.tags) {
|
||||
t.Errorf("%d:%s: Tags was %v; want %v", i, tt.desc, a, tt.tags)
|
||||
}
|
||||
}
|
||||
}
|
92
vendor/golang.org/x/text/language/display/dict.go
generated
vendored
Normal file
92
vendor/golang.org/x/text/language/display/dict.go
generated
vendored
Normal file
@ -0,0 +1,92 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package display
|
||||
|
||||
// This file contains sets of data for specific languages. Users can use these
|
||||
// to create smaller collections of supported languages and reduce total table
|
||||
// size.
|
||||
|
||||
// The variable names defined here correspond to those in package language.
|
||||
|
||||
var (
|
||||
Afrikaans *Dictionary = &af // af
|
||||
Amharic *Dictionary = &am // am
|
||||
Arabic *Dictionary = &ar // ar
|
||||
ModernStandardArabic *Dictionary = Arabic // ar-001
|
||||
Azerbaijani *Dictionary = &az // az
|
||||
Bulgarian *Dictionary = &bg // bg
|
||||
Bengali *Dictionary = &bn // bn
|
||||
Catalan *Dictionary = &ca // ca
|
||||
Czech *Dictionary = &cs // cs
|
||||
Danish *Dictionary = &da // da
|
||||
German *Dictionary = &de // de
|
||||
Greek *Dictionary = &el // el
|
||||
English *Dictionary = &en // en
|
||||
AmericanEnglish *Dictionary = English // en-US
|
||||
BritishEnglish *Dictionary = English // en-GB
|
||||
Spanish *Dictionary = &es // es
|
||||
EuropeanSpanish *Dictionary = Spanish // es-ES
|
||||
LatinAmericanSpanish *Dictionary = Spanish // es-419
|
||||
Estonian *Dictionary = &et // et
|
||||
Persian *Dictionary = &fa // fa
|
||||
Finnish *Dictionary = &fi // fi
|
||||
Filipino *Dictionary = &fil // fil
|
||||
French *Dictionary = &fr // fr
|
||||
Gujarati *Dictionary = &gu // gu
|
||||
Hebrew *Dictionary = &he // he
|
||||
Hindi *Dictionary = &hi // hi
|
||||
Croatian *Dictionary = &hr // hr
|
||||
Hungarian *Dictionary = &hu // hu
|
||||
Armenian *Dictionary = &hy // hy
|
||||
Indonesian *Dictionary = &id // id
|
||||
Icelandic *Dictionary = &is // is
|
||||
Italian *Dictionary = &it // it
|
||||
Japanese *Dictionary = &ja // ja
|
||||
Georgian *Dictionary = &ka // ka
|
||||
Kazakh *Dictionary = &kk // kk
|
||||
Khmer *Dictionary = &km // km
|
||||
Kannada *Dictionary = &kn // kn
|
||||
Korean *Dictionary = &ko // ko
|
||||
Kirghiz *Dictionary = &ky // ky
|
||||
Lao *Dictionary = &lo // lo
|
||||
Lithuanian *Dictionary = < // lt
|
||||
Latvian *Dictionary = &lv // lv
|
||||
Macedonian *Dictionary = &mk // mk
|
||||
Malayalam *Dictionary = &ml // ml
|
||||
Mongolian *Dictionary = &mn // mn
|
||||
Marathi *Dictionary = &mr // mr
|
||||
Malay *Dictionary = &ms // ms
|
||||
Burmese *Dictionary = &my // my
|
||||
Nepali *Dictionary = &ne // ne
|
||||
Dutch *Dictionary = &nl // nl
|
||||
Norwegian *Dictionary = &no // no
|
||||
Punjabi *Dictionary = &pa // pa
|
||||
Polish *Dictionary = &pl // pl
|
||||
Portuguese *Dictionary = &pt // pt
|
||||
BrazilianPortuguese *Dictionary = Portuguese // pt-BR
|
||||
EuropeanPortuguese *Dictionary = &ptPT // pt-PT
|
||||
Romanian *Dictionary = &ro // ro
|
||||
Russian *Dictionary = &ru // ru
|
||||
Sinhala *Dictionary = &si // si
|
||||
Slovak *Dictionary = &sk // sk
|
||||
Slovenian *Dictionary = &sl // sl
|
||||
Albanian *Dictionary = &sq // sq
|
||||
Serbian *Dictionary = &sr // sr
|
||||
SerbianLatin *Dictionary = &srLatn // sr
|
||||
Swedish *Dictionary = &sv // sv
|
||||
Swahili *Dictionary = &sw // sw
|
||||
Tamil *Dictionary = &ta // ta
|
||||
Telugu *Dictionary = &te // te
|
||||
Thai *Dictionary = &th // th
|
||||
Turkish *Dictionary = &tr // tr
|
||||
Ukrainian *Dictionary = &uk // uk
|
||||
Urdu *Dictionary = &ur // ur
|
||||
Uzbek *Dictionary = &uz // uz
|
||||
Vietnamese *Dictionary = &vi // vi
|
||||
Chinese *Dictionary = &zh // zh
|
||||
SimplifiedChinese *Dictionary = Chinese // zh-Hans
|
||||
TraditionalChinese *Dictionary = &zhHant // zh-Hant
|
||||
Zulu *Dictionary = &zu // zu
|
||||
)
|
39
vendor/golang.org/x/text/language/display/dict_test.go
generated
vendored
Normal file
39
vendor/golang.org/x/text/language/display/dict_test.go
generated
vendored
Normal file
@ -0,0 +1,39 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package display
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"golang.org/x/text/internal/testtext"
|
||||
)
|
||||
|
||||
func TestLinking(t *testing.T) {
|
||||
base := getSize(t, `display.Tags(language.English).Name(language.English)`)
|
||||
compact := getSize(t, `display.English.Languages().Name(language.English)`)
|
||||
|
||||
if d := base - compact; d < 1.5*1024*1024 {
|
||||
t.Errorf("size(base) - size(compact) = %d - %d = was %d; want > 1.5MB", base, compact, d)
|
||||
}
|
||||
}
|
||||
|
||||
func getSize(t *testing.T, main string) int {
|
||||
size, err := testtext.CodeSize(fmt.Sprintf(body, main))
|
||||
if err != nil {
|
||||
t.Skipf("skipping link size test; binary size could not be determined: %v", err)
|
||||
}
|
||||
return size
|
||||
}
|
||||
|
||||
const body = `package main
|
||||
import (
|
||||
"golang.org/x/text/language"
|
||||
"golang.org/x/text/language/display"
|
||||
)
|
||||
func main() {
|
||||
%s
|
||||
}
|
||||
`
|
420
vendor/golang.org/x/text/language/display/display.go
generated
vendored
Normal file
420
vendor/golang.org/x/text/language/display/display.go
generated
vendored
Normal file
@ -0,0 +1,420 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:generate go run maketables.go -output tables.go
|
||||
|
||||
// Package display provides display names for languages, scripts and regions in
|
||||
// a requested language.
|
||||
//
|
||||
// The data is based on CLDR's localeDisplayNames. It includes the names of the
|
||||
// draft level "contributed" or "approved". The resulting tables are quite
|
||||
// large. The display package is designed so that users can reduce the linked-in
|
||||
// table sizes by cherry picking the languages one wishes to support. There is a
|
||||
// Dictionary defined for a selected set of common languages for this purpose.
|
||||
package display // import "golang.org/x/text/language/display"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/text/internal/format"
|
||||
"golang.org/x/text/language"
|
||||
)
|
||||
|
||||
/*
|
||||
TODO:
|
||||
All fairly low priority at the moment:
|
||||
- Include alternative and variants as an option (using func options).
|
||||
- Option for returning the empty string for undefined values.
|
||||
- Support variants, currencies, time zones, option names and other data
|
||||
provided in CLDR.
|
||||
- Do various optimizations:
|
||||
- Reduce size of offset tables.
|
||||
- Consider compressing infrequently used languages and decompress on demand.
|
||||
*/
|
||||
|
||||
// A Formatter formats a tag in the current language. It is used in conjunction
|
||||
// with the message package.
|
||||
type Formatter struct {
|
||||
lookup func(tag int, x interface{}) string
|
||||
x interface{}
|
||||
}
|
||||
|
||||
// Format implements "golang.org/x/text/internal/format".Formatter.
|
||||
func (f Formatter) Format(state format.State, verb rune) {
|
||||
// TODO: there are a lot of inefficiencies in this code. Fix it when we
|
||||
// language.Tag has embedded compact tags.
|
||||
t := state.Language()
|
||||
_, index, _ := matcher.Match(t)
|
||||
str := f.lookup(index, f.x)
|
||||
if str == "" {
|
||||
// TODO: use language-specific punctuation.
|
||||
// TODO: use codePattern instead of language?
|
||||
if unknown := f.lookup(index, language.Und); unknown != "" {
|
||||
fmt.Fprintf(state, "%v (%v)", unknown, f.x)
|
||||
} else {
|
||||
fmt.Fprintf(state, "[language: %v]", f.x)
|
||||
}
|
||||
} else {
|
||||
state.Write([]byte(str))
|
||||
}
|
||||
}
|
||||
|
||||
// Language returns a Formatter that renders the name for lang in the
|
||||
// the current language. x may be a language.Base or a language.Tag.
|
||||
// It renders lang in the default language if no translation for the current
|
||||
// language is supported.
|
||||
func Language(lang interface{}) Formatter {
|
||||
return Formatter{langFunc, lang}
|
||||
}
|
||||
|
||||
// Region returns a Formatter that renders the name for region in the current
|
||||
// language. region may be a language.Region or a language.Tag.
|
||||
// It renders region in the default language if no translation for the current
|
||||
// language is supported.
|
||||
func Region(region interface{}) Formatter {
|
||||
return Formatter{regionFunc, region}
|
||||
}
|
||||
|
||||
// Script returns a Formatter that renders the name for script in the current
|
||||
// language. script may be a language.Script or a language.Tag.
|
||||
// It renders script in the default language if no translation for the current
|
||||
// language is supported.
|
||||
func Script(script interface{}) Formatter {
|
||||
return Formatter{scriptFunc, script}
|
||||
}
|
||||
|
||||
// Script returns a Formatter that renders the name for tag in the current
|
||||
// language. tag may be a language.Tag.
|
||||
// It renders tag in the default language if no translation for the current
|
||||
// language is supported.
|
||||
func Tag(tag interface{}) Formatter {
|
||||
return Formatter{tagFunc, tag}
|
||||
}
|
||||
|
||||
// A Namer is used to get the name for a given value, such as a Tag, Language,
|
||||
// Script or Region.
|
||||
type Namer interface {
|
||||
// Name returns a display string for the given value. A Namer returns an
|
||||
// empty string for values it does not support. A Namer may support naming
|
||||
// an unspecified value. For example, when getting the name for a region for
|
||||
// a tag that does not have a defined Region, it may return the name for an
|
||||
// unknown region. It is up to the user to filter calls to Name for values
|
||||
// for which one does not want to have a name string.
|
||||
Name(x interface{}) string
|
||||
}
|
||||
|
||||
var (
|
||||
// Supported lists the languages for which names are defined.
|
||||
Supported language.Coverage
|
||||
|
||||
// The set of all possible values for which names are defined. Note that not
|
||||
// all Namer implementations will cover all the values of a given type.
|
||||
// A Namer will return the empty string for unsupported values.
|
||||
Values language.Coverage
|
||||
|
||||
matcher language.Matcher
|
||||
)
|
||||
|
||||
func init() {
|
||||
tags := make([]language.Tag, numSupported)
|
||||
s := supported
|
||||
for i := range tags {
|
||||
p := strings.IndexByte(s, '|')
|
||||
tags[i] = language.Raw.Make(s[:p])
|
||||
s = s[p+1:]
|
||||
}
|
||||
matcher = language.NewMatcher(tags)
|
||||
Supported = language.NewCoverage(tags)
|
||||
|
||||
Values = language.NewCoverage(langTagSet.Tags, supportedScripts, supportedRegions)
|
||||
}
|
||||
|
||||
// Languages returns a Namer for naming languages. It returns nil if there is no
|
||||
// data for the given tag. The type passed to Name must be either language.Base
|
||||
// or language.Tag. Note that the result may differ between passing a tag or its
|
||||
// base language. For example, for English, passing "nl-BE" would return Flemish
|
||||
// whereas passing "nl" returns "Dutch".
|
||||
func Languages(t language.Tag) Namer {
|
||||
if _, index, conf := matcher.Match(t); conf != language.No {
|
||||
return languageNamer(index)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type languageNamer int
|
||||
|
||||
func langFunc(i int, x interface{}) string {
|
||||
return nameLanguage(languageNamer(i), x)
|
||||
}
|
||||
|
||||
func (n languageNamer) name(i int) string {
|
||||
return lookup(langHeaders[:], int(n), i)
|
||||
}
|
||||
|
||||
// Name implements the Namer interface for language names.
|
||||
func (n languageNamer) Name(x interface{}) string {
|
||||
return nameLanguage(n, x)
|
||||
}
|
||||
|
||||
// nonEmptyIndex walks up the parent chain until a non-empty header is found.
|
||||
// It returns -1 if no index could be found.
|
||||
func nonEmptyIndex(h []header, index int) int {
|
||||
for ; index != -1 && h[index].data == ""; index = int(parents[index]) {
|
||||
}
|
||||
return index
|
||||
}
|
||||
|
||||
// Scripts returns a Namer for naming scripts. It returns nil if there is no
|
||||
// data for the given tag. The type passed to Name must be either a
|
||||
// language.Script or a language.Tag. It will not attempt to infer a script for
|
||||
// tags with an unspecified script.
|
||||
func Scripts(t language.Tag) Namer {
|
||||
if _, index, conf := matcher.Match(t); conf != language.No {
|
||||
if index = nonEmptyIndex(scriptHeaders[:], index); index != -1 {
|
||||
return scriptNamer(index)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type scriptNamer int
|
||||
|
||||
func scriptFunc(i int, x interface{}) string {
|
||||
return nameScript(scriptNamer(i), x)
|
||||
}
|
||||
|
||||
func (n scriptNamer) name(i int) string {
|
||||
return lookup(scriptHeaders[:], int(n), i)
|
||||
}
|
||||
|
||||
// Name implements the Namer interface for script names.
|
||||
func (n scriptNamer) Name(x interface{}) string {
|
||||
return nameScript(n, x)
|
||||
}
|
||||
|
||||
// Regions returns a Namer for naming regions. It returns nil if there is no
|
||||
// data for the given tag. The type passed to Name must be either a
|
||||
// language.Region or a language.Tag. It will not attempt to infer a region for
|
||||
// tags with an unspecified region.
|
||||
func Regions(t language.Tag) Namer {
|
||||
if _, index, conf := matcher.Match(t); conf != language.No {
|
||||
if index = nonEmptyIndex(regionHeaders[:], index); index != -1 {
|
||||
return regionNamer(index)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type regionNamer int
|
||||
|
||||
func regionFunc(i int, x interface{}) string {
|
||||
return nameRegion(regionNamer(i), x)
|
||||
}
|
||||
|
||||
func (n regionNamer) name(i int) string {
|
||||
return lookup(regionHeaders[:], int(n), i)
|
||||
}
|
||||
|
||||
// Name implements the Namer interface for region names.
|
||||
func (n regionNamer) Name(x interface{}) string {
|
||||
return nameRegion(n, x)
|
||||
}
|
||||
|
||||
// Tags returns a Namer for giving a full description of a tag. The names of
|
||||
// scripts and regions that are not already implied by the language name will
|
||||
// in appended within parentheses. It returns nil if there is not data for the
|
||||
// given tag. The type passed to Name must be a tag.
|
||||
func Tags(t language.Tag) Namer {
|
||||
if _, index, conf := matcher.Match(t); conf != language.No {
|
||||
return tagNamer(index)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type tagNamer int
|
||||
|
||||
func tagFunc(i int, x interface{}) string {
|
||||
return nameTag(languageNamer(i), scriptNamer(i), regionNamer(i), x)
|
||||
}
|
||||
|
||||
// Name implements the Namer interface for tag names.
|
||||
func (n tagNamer) Name(x interface{}) string {
|
||||
return nameTag(languageNamer(n), scriptNamer(n), regionNamer(n), x)
|
||||
}
|
||||
|
||||
// lookup finds the name for an entry in a global table, traversing the
|
||||
// inheritance hierarchy if needed.
|
||||
func lookup(table []header, dict, want int) string {
|
||||
for dict != -1 {
|
||||
if s := table[dict].name(want); s != "" {
|
||||
return s
|
||||
}
|
||||
dict = int(parents[dict])
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// A Dictionary holds a collection of Namers for a single language. One can
|
||||
// reduce the amount of data linked in to a binary by only referencing
|
||||
// Dictionaries for the languages one needs to support instead of using the
|
||||
// generic Namer factories.
|
||||
type Dictionary struct {
|
||||
parent *Dictionary
|
||||
lang header
|
||||
script header
|
||||
region header
|
||||
}
|
||||
|
||||
// Tags returns a Namer for giving a full description of a tag. The names of
|
||||
// scripts and regions that are not already implied by the language name will
|
||||
// in appended within parentheses. It returns nil if there is not data for the
|
||||
// given tag. The type passed to Name must be a tag.
|
||||
func (d *Dictionary) Tags() Namer {
|
||||
return dictTags{d}
|
||||
}
|
||||
|
||||
type dictTags struct {
|
||||
d *Dictionary
|
||||
}
|
||||
|
||||
// Name implements the Namer interface for tag names.
|
||||
func (n dictTags) Name(x interface{}) string {
|
||||
return nameTag(dictLanguages{n.d}, dictScripts{n.d}, dictRegions{n.d}, x)
|
||||
}
|
||||
|
||||
// Languages returns a Namer for naming languages. It returns nil if there is no
|
||||
// data for the given tag. The type passed to Name must be either language.Base
|
||||
// or language.Tag. Note that the result may differ between passing a tag or its
|
||||
// base language. For example, for English, passing "nl-BE" would return Flemish
|
||||
// whereas passing "nl" returns "Dutch".
|
||||
func (d *Dictionary) Languages() Namer {
|
||||
return dictLanguages{d}
|
||||
}
|
||||
|
||||
type dictLanguages struct {
|
||||
d *Dictionary
|
||||
}
|
||||
|
||||
func (n dictLanguages) name(i int) string {
|
||||
for d := n.d; d != nil; d = d.parent {
|
||||
if s := d.lang.name(i); s != "" {
|
||||
return s
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// Name implements the Namer interface for language names.
|
||||
func (n dictLanguages) Name(x interface{}) string {
|
||||
return nameLanguage(n, x)
|
||||
}
|
||||
|
||||
// Scripts returns a Namer for naming scripts. It returns nil if there is no
|
||||
// data for the given tag. The type passed to Name must be either a
|
||||
// language.Script or a language.Tag. It will not attempt to infer a script for
|
||||
// tags with an unspecified script.
|
||||
func (d *Dictionary) Scripts() Namer {
|
||||
return dictScripts{d}
|
||||
}
|
||||
|
||||
type dictScripts struct {
|
||||
d *Dictionary
|
||||
}
|
||||
|
||||
func (n dictScripts) name(i int) string {
|
||||
for d := n.d; d != nil; d = d.parent {
|
||||
if s := d.script.name(i); s != "" {
|
||||
return s
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// Name implements the Namer interface for script names.
|
||||
func (n dictScripts) Name(x interface{}) string {
|
||||
return nameScript(n, x)
|
||||
}
|
||||
|
||||
// Regions returns a Namer for naming regions. It returns nil if there is no
|
||||
// data for the given tag. The type passed to Name must be either a
|
||||
// language.Region or a language.Tag. It will not attempt to infer a region for
|
||||
// tags with an unspecified region.
|
||||
func (d *Dictionary) Regions() Namer {
|
||||
return dictRegions{d}
|
||||
}
|
||||
|
||||
type dictRegions struct {
|
||||
d *Dictionary
|
||||
}
|
||||
|
||||
func (n dictRegions) name(i int) string {
|
||||
for d := n.d; d != nil; d = d.parent {
|
||||
if s := d.region.name(i); s != "" {
|
||||
return s
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// Name implements the Namer interface for region names.
|
||||
func (n dictRegions) Name(x interface{}) string {
|
||||
return nameRegion(n, x)
|
||||
}
|
||||
|
||||
// A SelfNamer implements a Namer that returns the name of language in this same
|
||||
// language. It provides a very compact mechanism to provide a comprehensive
|
||||
// list of languages to users in their native language.
|
||||
type SelfNamer struct {
|
||||
// Supported defines the values supported by this Namer.
|
||||
Supported language.Coverage
|
||||
}
|
||||
|
||||
var (
|
||||
// Self is a shared instance of a SelfNamer.
|
||||
Self *SelfNamer = &self
|
||||
|
||||
self = SelfNamer{language.NewCoverage(selfTagSet.Tags)}
|
||||
)
|
||||
|
||||
// Name returns the name of a given language tag in the language identified by
|
||||
// this tag. It supports both the language.Base and language.Tag types.
|
||||
func (n SelfNamer) Name(x interface{}) string {
|
||||
t, _ := language.All.Compose(x)
|
||||
base, scr, reg := t.Raw()
|
||||
baseScript := language.Script{}
|
||||
if (scr == language.Script{} && reg != language.Region{}) {
|
||||
// For looking up in the self dictionary, we need to select the
|
||||
// maximized script. This is even the case if the script isn't
|
||||
// specified.
|
||||
s1, _ := t.Script()
|
||||
if baseScript = getScript(base); baseScript != s1 {
|
||||
scr = s1
|
||||
}
|
||||
}
|
||||
|
||||
i, scr, reg := selfTagSet.index(base, scr, reg)
|
||||
if i == -1 {
|
||||
return ""
|
||||
}
|
||||
|
||||
// Only return the display name if the script matches the expected script.
|
||||
if (scr != language.Script{}) {
|
||||
if (baseScript == language.Script{}) {
|
||||
baseScript = getScript(base)
|
||||
}
|
||||
if baseScript != scr {
|
||||
return ""
|
||||
}
|
||||
}
|
||||
|
||||
return selfHeaders[0].name(i)
|
||||
}
|
||||
|
||||
// getScript returns the maximized script for a base language.
|
||||
func getScript(b language.Base) language.Script {
|
||||
tag, _ := language.Raw.Compose(b)
|
||||
scr, _ := tag.Script()
|
||||
return scr
|
||||
}
|
714
vendor/golang.org/x/text/language/display/display_test.go
generated
vendored
Normal file
714
vendor/golang.org/x/text/language/display/display_test.go
generated
vendored
Normal file
@ -0,0 +1,714 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package display
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
"unicode"
|
||||
|
||||
"golang.org/x/text/internal/testtext"
|
||||
"golang.org/x/text/language"
|
||||
"golang.org/x/text/message"
|
||||
)
|
||||
|
||||
// TODO: test that tables are properly dropped by the linker for various use
|
||||
// cases.
|
||||
|
||||
var (
|
||||
firstLang2aa = language.MustParseBase("aa")
|
||||
lastLang2zu = language.MustParseBase("zu")
|
||||
firstLang3ace = language.MustParseBase("ace")
|
||||
lastLang3zza = language.MustParseBase("zza")
|
||||
firstTagAr001 = language.MustParse("ar-001")
|
||||
lastTagZhHant = language.MustParse("zh-Hant")
|
||||
)
|
||||
|
||||
// TestValues tests that for all languages, regions, and scripts in Values, at
|
||||
// least one language has a name defined for it by checking it exists in
|
||||
// English, which is assumed to be the most comprehensive. It is also tested
|
||||
// that a Namer returns "" for unsupported values.
|
||||
func TestValues(t *testing.T) {
|
||||
type testcase struct {
|
||||
kind string
|
||||
n Namer
|
||||
}
|
||||
// checkDefined checks that a value exists in a Namer.
|
||||
checkDefined := func(x interface{}, namers []testcase) {
|
||||
for _, n := range namers {
|
||||
testtext.Run(t, fmt.Sprintf("%s.Name(%s)", n.kind, x), func(t *testing.T) {
|
||||
if n.n.Name(x) == "" {
|
||||
// As of version 28 there is no data for az-Arab in English,
|
||||
// although there is useful data in other languages.
|
||||
if x.(fmt.Stringer).String() == "az-Arab" {
|
||||
return
|
||||
}
|
||||
t.Errorf("supported but no result")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
// checkUnsupported checks that a value does not exist in a Namer.
|
||||
checkUnsupported := func(x interface{}, namers []testcase) {
|
||||
for _, n := range namers {
|
||||
if got := n.n.Name(x); got != "" {
|
||||
t.Fatalf("%s.Name(%s): unsupported tag gave non-empty result: %q", n.kind, x, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
tags := map[language.Tag]bool{}
|
||||
namers := []testcase{
|
||||
{"Languages(en)", Languages(language.English)},
|
||||
{"Tags(en)", Tags(language.English)},
|
||||
{"English.Languages()", English.Languages()},
|
||||
{"English.Tags()", English.Tags()},
|
||||
}
|
||||
for _, tag := range Values.Tags() {
|
||||
checkDefined(tag, namers)
|
||||
tags[tag] = true
|
||||
}
|
||||
for _, base := range language.Supported.BaseLanguages() {
|
||||
tag, _ := language.All.Compose(base)
|
||||
if !tags[tag] {
|
||||
checkUnsupported(tag, namers)
|
||||
}
|
||||
}
|
||||
|
||||
regions := map[language.Region]bool{}
|
||||
namers = []testcase{
|
||||
{"Regions(en)", Regions(language.English)},
|
||||
{"English.Regions()", English.Regions()},
|
||||
}
|
||||
for _, r := range Values.Regions() {
|
||||
checkDefined(r, namers)
|
||||
regions[r] = true
|
||||
}
|
||||
for _, r := range language.Supported.Regions() {
|
||||
if r = r.Canonicalize(); !regions[r] {
|
||||
checkUnsupported(r, namers)
|
||||
}
|
||||
}
|
||||
|
||||
scripts := map[language.Script]bool{}
|
||||
namers = []testcase{
|
||||
{"Scripts(en)", Scripts(language.English)},
|
||||
{"English.Scripts()", English.Scripts()},
|
||||
}
|
||||
for _, s := range Values.Scripts() {
|
||||
checkDefined(s, namers)
|
||||
scripts[s] = true
|
||||
}
|
||||
for _, s := range language.Supported.Scripts() {
|
||||
// Canonicalize the script.
|
||||
tag, _ := language.DeprecatedScript.Compose(s)
|
||||
if _, s, _ = tag.Raw(); !scripts[s] {
|
||||
checkUnsupported(s, namers)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestSupported tests that we have at least some Namers for languages that we
|
||||
// claim to support. To test the claims in the documentation, it also verifies
|
||||
// that if a Namer is returned, it will have at least some data.
|
||||
func TestSupported(t *testing.T) {
|
||||
supportedTags := Supported.Tags()
|
||||
if len(supportedTags) != numSupported {
|
||||
t.Errorf("number of supported was %d; want %d", len(supportedTags), numSupported)
|
||||
}
|
||||
|
||||
namerFuncs := []struct {
|
||||
kind string
|
||||
fn func(language.Tag) Namer
|
||||
}{
|
||||
{"Tags", Tags},
|
||||
{"Languages", Languages},
|
||||
{"Regions", Regions},
|
||||
{"Scripts", Scripts},
|
||||
}
|
||||
|
||||
// Verify that we have at least one Namer for all tags we claim to support.
|
||||
tags := make(map[language.Tag]bool)
|
||||
for _, tag := range supportedTags {
|
||||
// Test we have at least one Namer for this supported Tag.
|
||||
found := false
|
||||
for _, kind := range namerFuncs {
|
||||
if defined(t, kind.kind, kind.fn(tag), tag) {
|
||||
found = true
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
t.Errorf("%s: supported, but no data available", tag)
|
||||
}
|
||||
if tags[tag] {
|
||||
t.Errorf("%s: included in Supported.Tags more than once", tag)
|
||||
}
|
||||
tags[tag] = true
|
||||
}
|
||||
|
||||
// Verify that we have no Namers for tags we don't claim to support.
|
||||
for _, base := range language.Supported.BaseLanguages() {
|
||||
tag, _ := language.All.Compose(base)
|
||||
// Skip tags that are supported after matching.
|
||||
if _, _, conf := matcher.Match(tag); conf != language.No {
|
||||
continue
|
||||
}
|
||||
// Test there are no Namers for this tag.
|
||||
for _, kind := range namerFuncs {
|
||||
if defined(t, kind.kind, kind.fn(tag), tag) {
|
||||
t.Errorf("%[1]s(%[2]s) returns a Namer, but %[2]s is not in the set of supported Tags.", kind.kind, tag)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// defined reports whether n is a proper Namer, which means it is non-nil and
|
||||
// must have at least one non-empty value.
|
||||
func defined(t *testing.T, kind string, n Namer, tag language.Tag) bool {
|
||||
if n == nil {
|
||||
return false
|
||||
}
|
||||
switch kind {
|
||||
case "Tags":
|
||||
for _, t := range Values.Tags() {
|
||||
if n.Name(t) != "" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
case "Languages":
|
||||
for _, t := range Values.BaseLanguages() {
|
||||
if n.Name(t) != "" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
case "Regions":
|
||||
for _, t := range Values.Regions() {
|
||||
if n.Name(t) != "" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
case "Scripts":
|
||||
for _, t := range Values.Scripts() {
|
||||
if n.Name(t) != "" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
t.Errorf("%s(%s) returns non-nil Namer without content", kind, tag)
|
||||
return false
|
||||
}
|
||||
|
||||
func TestCoverage(t *testing.T) {
|
||||
en := language.English
|
||||
tests := []struct {
|
||||
n Namer
|
||||
x interface{}
|
||||
}{
|
||||
{Languages(en), Values.Tags()},
|
||||
{Scripts(en), Values.Scripts()},
|
||||
{Regions(en), Values.Regions()},
|
||||
}
|
||||
for i, tt := range tests {
|
||||
uniq := make(map[string]interface{})
|
||||
|
||||
v := reflect.ValueOf(tt.x)
|
||||
for j := 0; j < v.Len(); j++ {
|
||||
x := v.Index(j).Interface()
|
||||
// As of version 28 there is no data for az-Arab in English,
|
||||
// although there is useful data in other languages.
|
||||
if x.(fmt.Stringer).String() == "az-Arab" {
|
||||
continue
|
||||
}
|
||||
s := tt.n.Name(x)
|
||||
if s == "" {
|
||||
t.Errorf("%d:%d:%s: missing content", i, j, x)
|
||||
} else if uniq[s] != nil {
|
||||
t.Errorf("%d:%d:%s: identical return value %q for %v and %v", i, j, x, s, x, uniq[s])
|
||||
}
|
||||
uniq[s] = x
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestUpdate tests whether dictionary entries for certain languages need to be
|
||||
// updated. For some languages, some of the headers may be empty or they may be
|
||||
// identical to the parent. This code detects if such entries need to be updated
|
||||
// after a table update.
|
||||
func TestUpdate(t *testing.T) {
|
||||
tests := []struct {
|
||||
d *Dictionary
|
||||
tag string
|
||||
}{
|
||||
{ModernStandardArabic, "ar-001"},
|
||||
{AmericanEnglish, "en-US"},
|
||||
{EuropeanSpanish, "es-ES"},
|
||||
{BrazilianPortuguese, "pt-BR"},
|
||||
{SimplifiedChinese, "zh-Hans"},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
_, i, _ := matcher.Match(language.MustParse(tt.tag))
|
||||
if !reflect.DeepEqual(tt.d.lang, langHeaders[i]) {
|
||||
t.Errorf("%s: lang table update needed", tt.tag)
|
||||
}
|
||||
if !reflect.DeepEqual(tt.d.script, scriptHeaders[i]) {
|
||||
t.Errorf("%s: script table update needed", tt.tag)
|
||||
}
|
||||
if !reflect.DeepEqual(tt.d.region, regionHeaders[i]) {
|
||||
t.Errorf("%s: region table update needed", tt.tag)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIndex(t *testing.T) {
|
||||
notIn := []string{"aa", "xx", "zz", "aaa", "xxx", "zzz", "Aaaa", "Xxxx", "Zzzz"}
|
||||
tests := []tagIndex{
|
||||
{
|
||||
"",
|
||||
"",
|
||||
"",
|
||||
},
|
||||
{
|
||||
"bb",
|
||||
"",
|
||||
"",
|
||||
},
|
||||
{
|
||||
"",
|
||||
"bbb",
|
||||
"",
|
||||
},
|
||||
{
|
||||
"",
|
||||
"",
|
||||
"Bbbb",
|
||||
},
|
||||
{
|
||||
"bb",
|
||||
"bbb",
|
||||
"Bbbb",
|
||||
},
|
||||
{
|
||||
"bbccddyy",
|
||||
"bbbcccdddyyy",
|
||||
"BbbbCcccDdddYyyy",
|
||||
},
|
||||
}
|
||||
for i, tt := range tests {
|
||||
// Create the test set from the tagIndex.
|
||||
cnt := 0
|
||||
for sz := 2; sz <= 4; sz++ {
|
||||
a := tt[sz-2]
|
||||
for j := 0; j < len(a); j += sz {
|
||||
s := a[j : j+sz]
|
||||
if idx := tt.index(s); idx != cnt {
|
||||
t.Errorf("%d:%s: index was %d; want %d", i, s, idx, cnt)
|
||||
}
|
||||
cnt++
|
||||
}
|
||||
}
|
||||
if n := tt.len(); n != cnt {
|
||||
t.Errorf("%d: len was %d; want %d", i, n, cnt)
|
||||
}
|
||||
for _, x := range notIn {
|
||||
if idx := tt.index(x); idx != -1 {
|
||||
t.Errorf("%d:%s: index was %d; want -1", i, x, idx)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTag(t *testing.T) {
|
||||
tests := []struct {
|
||||
dict string
|
||||
tag string
|
||||
name string
|
||||
}{
|
||||
// sr is in Value.Languages(), but is not supported by agq.
|
||||
{"agq", "sr", "|[language: sr]"},
|
||||
{"nl", "nl", "Nederlands"},
|
||||
// CLDR 30 dropped Vlaams as the word for nl-BE. It is still called
|
||||
// Flemish in English, though. TODO: check if this is a CLDR bug.
|
||||
// {"nl", "nl-BE", "Vlaams"},
|
||||
{"nl", "nl-BE", "Nederlands (België)"},
|
||||
{"nl", "vls", "West-Vlaams"},
|
||||
{"en", "nl-BE", "Flemish"},
|
||||
{"en", "en", "English"},
|
||||
{"en", "en-GB", "British English"},
|
||||
{"en", "en-US", "American English"}, // American English in CLDR 24+
|
||||
{"ru", "ru", "русский"},
|
||||
{"ru", "ru-RU", "русский (Россия)"},
|
||||
{"ru", "ru-Cyrl", "русский (кириллица)"},
|
||||
{"en", lastLang2zu.String(), "Zulu"},
|
||||
{"en", firstLang2aa.String(), "Afar"},
|
||||
{"en", lastLang3zza.String(), "Zaza"},
|
||||
{"en", firstLang3ace.String(), "Achinese"},
|
||||
{"en", firstTagAr001.String(), "Modern Standard Arabic"},
|
||||
{"en", lastTagZhHant.String(), "Traditional Chinese"},
|
||||
{"en", "aaa", "|Unknown language (aaa)"},
|
||||
{"en", "zzj", "|Unknown language (zzj)"},
|
||||
// If full tag doesn't match, try without script or region.
|
||||
{"en", "aa-Hans", "Afar (Simplified Han)"},
|
||||
{"en", "af-Arab", "Afrikaans (Arabic)"},
|
||||
{"en", "zu-Cyrl", "Zulu (Cyrillic)"},
|
||||
{"en", "aa-GB", "Afar (United Kingdom)"},
|
||||
{"en", "af-NA", "Afrikaans (Namibia)"},
|
||||
{"en", "zu-BR", "Zulu (Brazil)"},
|
||||
// Correct inheritance and language selection.
|
||||
{"zh", "zh-TW", "中文 (台湾)"},
|
||||
{"zh", "zh-Hant-TW", "繁体中文 (台湾)"},
|
||||
{"zh-Hant", "zh-TW", "中文 (台灣)"},
|
||||
{"zh-Hant", "zh-Hant-TW", "繁體中文 (台灣)"},
|
||||
// Some rather arbitrary interpretations for Serbian. This is arguably
|
||||
// correct and consistent with the way zh-[Hant-]TW is handled. It will
|
||||
// also give results more in line with the expectations if users
|
||||
// explicitly use "sh".
|
||||
{"sr-Latn", "sr-ME", "srpski (Crna Gora)"},
|
||||
{"sr-Latn", "sr-Latn-ME", "srpskohrvatski (Crna Gora)"},
|
||||
// Double script and region
|
||||
{"nl", "en-Cyrl-BE", "Engels (Cyrillisch, België)"},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.dict+"/"+tt.tag, func(t *testing.T) {
|
||||
name, fmtName := splitName(tt.name)
|
||||
dict := language.MustParse(tt.dict)
|
||||
tag := language.Raw.MustParse(tt.tag)
|
||||
d := Tags(dict)
|
||||
if n := d.Name(tag); n != name {
|
||||
// There are inconsistencies w.r.t. capitalization in the tests
|
||||
// due to CLDR's update procedure which treats modern and other
|
||||
// languages differently.
|
||||
// See http://unicode.org/cldr/trac/ticket/8051.
|
||||
// TODO: use language capitalization to sanitize the strings.
|
||||
t.Errorf("Name(%s) = %q; want %q", tag, n, name)
|
||||
}
|
||||
|
||||
p := message.NewPrinter(dict)
|
||||
if n := p.Sprint(Tag(tag)); n != fmtName {
|
||||
t.Errorf("Tag(%s) = %q; want %q", tag, n, fmtName)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func splitName(names string) (name, formatName string) {
|
||||
split := strings.Split(names, "|")
|
||||
name, formatName = split[0], split[0]
|
||||
if len(split) > 1 {
|
||||
formatName = split[1]
|
||||
}
|
||||
return name, formatName
|
||||
}
|
||||
|
||||
func TestLanguage(t *testing.T) {
|
||||
tests := []struct {
|
||||
dict string
|
||||
tag string
|
||||
name string
|
||||
}{
|
||||
// sr is in Value.Languages(), but is not supported by agq.
|
||||
{"agq", "sr", "|[language: sr]"},
|
||||
// CLDR 30 dropped Vlaams as the word for nl-BE. It is still called
|
||||
// Flemish in English, though. TODO: this is probably incorrect.
|
||||
// West-Vlaams (vls) is not Vlaams. West-Vlaams could be considered its
|
||||
// own language, whereas Vlaams is generally Dutch. So expect to have
|
||||
// to change these tests back.
|
||||
{"nl", "nl", "Nederlands"},
|
||||
{"nl", "vls", "West-Vlaams"},
|
||||
{"nl", "nl-BE", "Nederlands"},
|
||||
{"en", "pt", "Portuguese"},
|
||||
{"en", "pt-PT", "European Portuguese"},
|
||||
{"en", "pt-BR", "Brazilian Portuguese"},
|
||||
{"en", "en", "English"},
|
||||
{"en", "en-GB", "British English"},
|
||||
{"en", "en-US", "American English"}, // American English in CLDR 24+
|
||||
{"en", lastLang2zu.String(), "Zulu"},
|
||||
{"en", firstLang2aa.String(), "Afar"},
|
||||
{"en", lastLang3zza.String(), "Zaza"},
|
||||
{"en", firstLang3ace.String(), "Achinese"},
|
||||
{"en", firstTagAr001.String(), "Modern Standard Arabic"},
|
||||
{"en", lastTagZhHant.String(), "Traditional Chinese"},
|
||||
{"en", "aaa", "|Unknown language (aaa)"},
|
||||
{"en", "zzj", "|Unknown language (zzj)"},
|
||||
// If full tag doesn't match, try without script or region.
|
||||
{"en", "aa-Hans", "Afar"},
|
||||
{"en", "af-Arab", "Afrikaans"},
|
||||
{"en", "zu-Cyrl", "Zulu"},
|
||||
{"en", "aa-GB", "Afar"},
|
||||
{"en", "af-NA", "Afrikaans"},
|
||||
{"en", "zu-BR", "Zulu"},
|
||||
{"agq", "zh-Hant", "|[language: zh-Hant]"},
|
||||
{"en", "sh", "Serbo-Croatian"},
|
||||
{"en", "sr-Latn", "Serbo-Croatian"},
|
||||
{"en", "sr", "Serbian"},
|
||||
{"en", "sr-ME", "Serbian"},
|
||||
{"en", "sr-Latn-ME", "Serbo-Croatian"}, // See comments in TestTag.
|
||||
}
|
||||
for _, tt := range tests {
|
||||
testtext.Run(t, tt.dict+"/"+tt.tag, func(t *testing.T) {
|
||||
name, fmtName := splitName(tt.name)
|
||||
dict := language.MustParse(tt.dict)
|
||||
tag := language.Raw.MustParse(tt.tag)
|
||||
p := message.NewPrinter(dict)
|
||||
d := Languages(dict)
|
||||
if n := d.Name(tag); n != name {
|
||||
t.Errorf("Name(%v) = %q; want %q", tag, n, name)
|
||||
}
|
||||
if n := p.Sprint(Language(tag)); n != fmtName {
|
||||
t.Errorf("Language(%v) = %q; want %q", tag, n, fmtName)
|
||||
}
|
||||
if len(tt.tag) <= 3 {
|
||||
base := language.MustParseBase(tt.tag)
|
||||
if n := d.Name(base); n != name {
|
||||
t.Errorf("Name(%v) = %q; want %q", base, n, name)
|
||||
}
|
||||
if n := p.Sprint(Language(base)); n != fmtName {
|
||||
t.Errorf("Language(%v) = %q; want %q", base, n, fmtName)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestScript(t *testing.T) {
|
||||
tests := []struct {
|
||||
dict string
|
||||
scr string
|
||||
name string
|
||||
}{
|
||||
{"nl", "Arab", "Arabisch"},
|
||||
{"en", "Arab", "Arabic"},
|
||||
{"en", "Zzzz", "Unknown Script"},
|
||||
{"zh-Hant", "Hang", "韓文字"},
|
||||
{"zh-Hant-HK", "Hang", "韓文字"},
|
||||
{"zh", "Arab", "阿拉伯文"},
|
||||
{"zh-Hans-HK", "Arab", "阿拉伯文"}, // same as zh
|
||||
{"zh-Hant", "Arab", "阿拉伯文"},
|
||||
{"zh-Hant-HK", "Arab", "阿拉伯文"}, // same as zh
|
||||
// Canonicalized form
|
||||
{"en", "Qaai", "Inherited"}, // deprecated script, now is Zinh
|
||||
{"en", "sh", "Unknown Script"}, // sh canonicalizes to sr-Latn
|
||||
{"en", "en", "Unknown Script"},
|
||||
// Don't introduce scripts with canonicalization.
|
||||
{"en", "sh", "Unknown Script"}, // sh canonicalizes to sr-Latn
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.dict+"/"+tt.scr, func(t *testing.T) {
|
||||
name, fmtName := splitName(tt.name)
|
||||
dict := language.MustParse(tt.dict)
|
||||
p := message.NewPrinter(dict)
|
||||
d := Scripts(dict)
|
||||
var tag language.Tag
|
||||
if unicode.IsUpper(rune(tt.scr[0])) {
|
||||
x := language.MustParseScript(tt.scr)
|
||||
if n := d.Name(x); n != name {
|
||||
t.Errorf("Name(%v) = %q; want %q", x, n, name)
|
||||
}
|
||||
if n := p.Sprint(Script(x)); n != fmtName {
|
||||
t.Errorf("Script(%v) = %q; want %q", x, n, fmtName)
|
||||
}
|
||||
tag, _ = language.Raw.Compose(x)
|
||||
} else {
|
||||
tag = language.Raw.MustParse(tt.scr)
|
||||
}
|
||||
if n := d.Name(tag); n != name {
|
||||
t.Errorf("Name(%v) = %q; want %q", tag, n, name)
|
||||
}
|
||||
if n := p.Sprint(Script(tag)); n != fmtName {
|
||||
t.Errorf("Script(%v) = %q; want %q", tag, n, fmtName)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRegion(t *testing.T) {
|
||||
tests := []struct {
|
||||
dict string
|
||||
reg string
|
||||
name string
|
||||
}{
|
||||
{"nl", "NL", "Nederland"},
|
||||
{"en", "US", "United States"},
|
||||
{"en", "ZZ", "Unknown Region"},
|
||||
{"en-GB", "NL", "Netherlands"},
|
||||
// Canonical equivalents
|
||||
{"en", "UK", "United Kingdom"},
|
||||
// No region
|
||||
{"en", "pt", "Unknown Region"},
|
||||
{"en", "und", "Unknown Region"},
|
||||
// Don't introduce regions with canonicalization.
|
||||
{"en", "mo", "Unknown Region"},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.dict+"/"+tt.reg, func(t *testing.T) {
|
||||
dict := language.MustParse(tt.dict)
|
||||
p := message.NewPrinter(dict)
|
||||
d := Regions(dict)
|
||||
var tag language.Tag
|
||||
if unicode.IsUpper(rune(tt.reg[0])) {
|
||||
// Region
|
||||
x := language.MustParseRegion(tt.reg)
|
||||
if n := d.Name(x); n != tt.name {
|
||||
t.Errorf("Name(%v) = %q; want %q", x, n, tt.name)
|
||||
}
|
||||
if n := p.Sprint(Region(x)); n != tt.name {
|
||||
t.Errorf("Region(%v) = %q; want %q", x, n, tt.name)
|
||||
}
|
||||
tag, _ = language.Raw.Compose(x)
|
||||
} else {
|
||||
tag = language.Raw.MustParse(tt.reg)
|
||||
}
|
||||
if n := d.Name(tag); n != tt.name {
|
||||
t.Errorf("Name(%v) = %q; want %q", tag, n, tt.name)
|
||||
}
|
||||
if n := p.Sprint(Region(tag)); n != tt.name {
|
||||
t.Errorf("Region(%v) = %q; want %q", tag, n, tt.name)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSelf(t *testing.T) {
|
||||
tests := []struct {
|
||||
tag string
|
||||
name string
|
||||
}{
|
||||
{"nl", "Nederlands"},
|
||||
// CLDR 30 dropped Vlaams as the word for nl-BE. It is still called
|
||||
// Flemish in English, though. TODO: check if this is a CLDR bug.
|
||||
// {"nl-BE", "Vlaams"},
|
||||
{"nl-BE", "Nederlands"},
|
||||
{"en-GB", "British English"},
|
||||
{lastLang2zu.String(), "isiZulu"},
|
||||
{firstLang2aa.String(), ""}, // not defined
|
||||
{lastLang3zza.String(), ""}, // not defined
|
||||
{firstLang3ace.String(), ""}, // not defined
|
||||
{firstTagAr001.String(), "العربية الرسمية الحديثة"},
|
||||
{"ar", "العربية"},
|
||||
{lastTagZhHant.String(), "繁體中文"},
|
||||
{"aaa", ""},
|
||||
{"zzj", ""},
|
||||
// Drop entries that are not in the requested script, even if there is
|
||||
// an entry for the language.
|
||||
{"aa-Hans", ""},
|
||||
{"af-Arab", ""},
|
||||
{"zu-Cyrl", ""},
|
||||
// Append the country name in the language of the matching language.
|
||||
{"af-NA", "Afrikaans"},
|
||||
{"zh", "中文"},
|
||||
// zh-TW should match zh-Hant instead of zh!
|
||||
{"zh-TW", "繁體中文"},
|
||||
{"zh-Hant", "繁體中文"},
|
||||
{"zh-Hans", "简体中文"},
|
||||
{"zh-Hant-TW", "繁體中文"},
|
||||
{"zh-Hans-TW", "简体中文"},
|
||||
// Take the entry for sr which has the matching script.
|
||||
// TODO: Capitalization changed as of CLDR 26, but change seems
|
||||
// arbitrary. Revisit capitalization with revision 27. See
|
||||
// http://unicode.org/cldr/trac/ticket/8051.
|
||||
{"sr", "српски"},
|
||||
// TODO: sr-ME should show up as Serbian or Montenegrin, not Serbo-
|
||||
// Croatian. This is an artifact of the current algorithm, which is the
|
||||
// way it is to have the preferred behavior for other languages such as
|
||||
// Chinese. We can hardwire this case in the table generator or package
|
||||
// code, but we first check if CLDR can be updated.
|
||||
// {"sr-ME", "Srpski"}, // Is Srpskohrvatski
|
||||
{"sr-Latn-ME", "srpskohrvatski"},
|
||||
{"sr-Cyrl-ME", "српски"},
|
||||
{"sr-NL", "српски"},
|
||||
// NOTE: kk is defined, but in Cyrillic script. For China, Arab is the
|
||||
// dominant script. We do not have data for kk-Arab and we chose to not
|
||||
// fall back in such cases.
|
||||
{"kk-CN", ""},
|
||||
}
|
||||
for i, tt := range tests {
|
||||
d := Self
|
||||
if n := d.Name(language.Raw.MustParse(tt.tag)); n != tt.name {
|
||||
t.Errorf("%d:%s: was %q; want %q", i, tt.tag, n, tt.name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEquivalence(t *testing.T) {
|
||||
testCases := []struct {
|
||||
desc string
|
||||
namer Namer
|
||||
}{
|
||||
{"Self", Self},
|
||||
{"Tags", Tags(language.Romanian)},
|
||||
{"Languages", Languages(language.Romanian)},
|
||||
{"Scripts", Scripts(language.Romanian)},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
ro := tc.namer.Name(language.Raw.MustParse("ro-MD"))
|
||||
mo := tc.namer.Name(language.Raw.MustParse("mo"))
|
||||
if ro != mo {
|
||||
t.Errorf("%q != %q", ro, mo)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDictionaryLang(t *testing.T) {
|
||||
tests := []struct {
|
||||
d *Dictionary
|
||||
tag string
|
||||
name string
|
||||
}{
|
||||
{English, "en", "English"},
|
||||
{Portuguese, "af", "africâner"},
|
||||
{EuropeanPortuguese, "af", "africanês"},
|
||||
{English, "nl-BE", "Flemish"},
|
||||
}
|
||||
for i, test := range tests {
|
||||
tag := language.MustParse(test.tag)
|
||||
if got := test.d.Tags().Name(tag); got != test.name {
|
||||
t.Errorf("%d:%v: got %s; want %s", i, tag, got, test.name)
|
||||
}
|
||||
if base, _ := language.Compose(tag.Base()); base == tag {
|
||||
if got := test.d.Languages().Name(base); got != test.name {
|
||||
t.Errorf("%d:%v: got %s; want %s", i, tag, got, test.name)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDictionaryRegion(t *testing.T) {
|
||||
tests := []struct {
|
||||
d *Dictionary
|
||||
region string
|
||||
name string
|
||||
}{
|
||||
{English, "FR", "France"},
|
||||
{Portuguese, "009", "Oceania"},
|
||||
{EuropeanPortuguese, "009", "Oceânia"},
|
||||
}
|
||||
for i, test := range tests {
|
||||
tag := language.MustParseRegion(test.region)
|
||||
if got := test.d.Regions().Name(tag); got != test.name {
|
||||
t.Errorf("%d:%v: got %s; want %s", i, tag, got, test.name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDictionaryScript(t *testing.T) {
|
||||
tests := []struct {
|
||||
d *Dictionary
|
||||
script string
|
||||
name string
|
||||
}{
|
||||
{English, "Cyrl", "Cyrillic"},
|
||||
{EuropeanPortuguese, "Gujr", "guzerate"},
|
||||
}
|
||||
for i, test := range tests {
|
||||
tag := language.MustParseScript(test.script)
|
||||
if got := test.d.Scripts().Name(tag); got != test.name {
|
||||
t.Errorf("%d:%v: got %s; want %s", i, tag, got, test.name)
|
||||
}
|
||||
}
|
||||
}
|
116
vendor/golang.org/x/text/language/display/examples_test.go
generated
vendored
Normal file
116
vendor/golang.org/x/text/language/display/examples_test.go
generated
vendored
Normal file
@ -0,0 +1,116 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package display_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"golang.org/x/text/language"
|
||||
"golang.org/x/text/language/display"
|
||||
"golang.org/x/text/message"
|
||||
)
|
||||
|
||||
func ExampleFormatter() {
|
||||
message.SetString(language.Dutch, "In %v people speak %v.", "In %v spreekt men %v.")
|
||||
|
||||
fr := language.French
|
||||
region, _ := fr.Region()
|
||||
for _, tag := range []string{"en", "nl"} {
|
||||
p := message.NewPrinter(language.Make(tag))
|
||||
|
||||
p.Printf("In %v people speak %v.", display.Region(region), display.Language(fr))
|
||||
p.Println()
|
||||
}
|
||||
|
||||
// Output:
|
||||
// In France people speak French.
|
||||
// In Frankrijk spreekt men Frans.
|
||||
}
|
||||
|
||||
func ExampleNamer() {
|
||||
supported := []string{
|
||||
"en-US", "en-GB", "ja", "zh", "zh-Hans", "zh-Hant", "pt", "pt-PT", "ko", "ar", "el", "ru", "uk", "pa",
|
||||
}
|
||||
|
||||
en := display.English.Languages()
|
||||
|
||||
for _, s := range supported {
|
||||
t := language.MustParse(s)
|
||||
fmt.Printf("%-20s (%s)\n", en.Name(t), display.Self.Name(t))
|
||||
}
|
||||
|
||||
// Output:
|
||||
// American English (American English)
|
||||
// British English (British English)
|
||||
// Japanese (日本語)
|
||||
// Chinese (中文)
|
||||
// Simplified Chinese (简体中文)
|
||||
// Traditional Chinese (繁體中文)
|
||||
// Portuguese (português)
|
||||
// European Portuguese (português europeu)
|
||||
// Korean (한국어)
|
||||
// Arabic (العربية)
|
||||
// Greek (Ελληνικά)
|
||||
// Russian (русский)
|
||||
// Ukrainian (українська)
|
||||
// Punjabi (ਪੰਜਾਬੀ)
|
||||
}
|
||||
|
||||
func ExampleTags() {
|
||||
n := display.Tags(language.English)
|
||||
fmt.Println(n.Name(language.Make("nl")))
|
||||
fmt.Println(n.Name(language.Make("nl-BE")))
|
||||
fmt.Println(n.Name(language.Make("nl-CW")))
|
||||
fmt.Println(n.Name(language.Make("nl-Arab")))
|
||||
fmt.Println(n.Name(language.Make("nl-Cyrl-RU")))
|
||||
|
||||
// Output:
|
||||
// Dutch
|
||||
// Flemish
|
||||
// Dutch (Curaçao)
|
||||
// Dutch (Arabic)
|
||||
// Dutch (Cyrillic, Russia)
|
||||
}
|
||||
|
||||
// ExampleDictionary shows how to reduce the amount of data linked into your
|
||||
// binary by only using the predefined Dictionary variables of the languages you
|
||||
// wish to support.
|
||||
func ExampleDictionary() {
|
||||
tags := []language.Tag{
|
||||
language.English,
|
||||
language.German,
|
||||
language.Japanese,
|
||||
language.Russian,
|
||||
}
|
||||
dicts := []*display.Dictionary{
|
||||
display.English,
|
||||
display.German,
|
||||
display.Japanese,
|
||||
display.Russian,
|
||||
}
|
||||
|
||||
m := language.NewMatcher(tags)
|
||||
|
||||
getDict := func(t language.Tag) *display.Dictionary {
|
||||
_, i, confidence := m.Match(t)
|
||||
// Skip this check if you want to support a fall-back language, which
|
||||
// will be the first one passed to NewMatcher.
|
||||
if confidence == language.No {
|
||||
return nil
|
||||
}
|
||||
return dicts[i]
|
||||
}
|
||||
|
||||
// The matcher will match Swiss German to German.
|
||||
n := getDict(language.Make("gsw")).Languages()
|
||||
fmt.Println(n.Name(language.German))
|
||||
fmt.Println(n.Name(language.Make("de-CH")))
|
||||
fmt.Println(n.Name(language.Make("gsw")))
|
||||
|
||||
// Output:
|
||||
// Deutsch
|
||||
// Schweizer Hochdeutsch
|
||||
// Schweizerdeutsch
|
||||
}
|
251
vendor/golang.org/x/text/language/display/lookup.go
generated
vendored
Normal file
251
vendor/golang.org/x/text/language/display/lookup.go
generated
vendored
Normal file
@ -0,0 +1,251 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package display
|
||||
|
||||
// This file contains common lookup code that is shared between the various
|
||||
// implementations of Namer and Dictionaries.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/text/language"
|
||||
)
|
||||
|
||||
type namer interface {
|
||||
// name gets the string for the given index. It should walk the
|
||||
// inheritance chain if a value is not present in the base index.
|
||||
name(idx int) string
|
||||
}
|
||||
|
||||
func nameLanguage(n namer, x interface{}) string {
|
||||
t, _ := language.All.Compose(x)
|
||||
for {
|
||||
i, _, _ := langTagSet.index(t.Raw())
|
||||
if s := n.name(i); s != "" {
|
||||
return s
|
||||
}
|
||||
if t = t.Parent(); t == language.Und {
|
||||
return ""
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func nameScript(n namer, x interface{}) string {
|
||||
t, _ := language.DeprecatedScript.Compose(x)
|
||||
_, s, _ := t.Raw()
|
||||
return n.name(scriptIndex.index(s.String()))
|
||||
}
|
||||
|
||||
func nameRegion(n namer, x interface{}) string {
|
||||
t, _ := language.DeprecatedRegion.Compose(x)
|
||||
_, _, r := t.Raw()
|
||||
return n.name(regionIndex.index(r.String()))
|
||||
}
|
||||
|
||||
func nameTag(langN, scrN, regN namer, x interface{}) string {
|
||||
t, ok := x.(language.Tag)
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
const form = language.All &^ language.SuppressScript
|
||||
if c, err := form.Canonicalize(t); err == nil {
|
||||
t = c
|
||||
}
|
||||
_, sRaw, rRaw := t.Raw()
|
||||
i, scr, reg := langTagSet.index(t.Raw())
|
||||
for i != -1 {
|
||||
if str := langN.name(i); str != "" {
|
||||
if hasS, hasR := (scr != language.Script{}), (reg != language.Region{}); hasS || hasR {
|
||||
ss, sr := "", ""
|
||||
if hasS {
|
||||
ss = scrN.name(scriptIndex.index(scr.String()))
|
||||
}
|
||||
if hasR {
|
||||
sr = regN.name(regionIndex.index(reg.String()))
|
||||
}
|
||||
// TODO: use patterns in CLDR or at least confirm they are the
|
||||
// same for all languages.
|
||||
if ss != "" && sr != "" {
|
||||
return fmt.Sprintf("%s (%s, %s)", str, ss, sr)
|
||||
}
|
||||
if ss != "" || sr != "" {
|
||||
return fmt.Sprintf("%s (%s%s)", str, ss, sr)
|
||||
}
|
||||
}
|
||||
return str
|
||||
}
|
||||
scr, reg = sRaw, rRaw
|
||||
if t = t.Parent(); t == language.Und {
|
||||
return ""
|
||||
}
|
||||
i, _, _ = langTagSet.index(t.Raw())
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// header contains the data and indexes for a single namer.
|
||||
// data contains a series of strings concatenated into one. index contains the
|
||||
// offsets for a string in data. For example, consider a header that defines
|
||||
// strings for the languages de, el, en, fi, and nl:
|
||||
//
|
||||
// header{
|
||||
// data: "GermanGreekEnglishDutch",
|
||||
// index: []uint16{ 0, 6, 11, 18, 18, 23 },
|
||||
// }
|
||||
//
|
||||
// For a language with index i, the string is defined by
|
||||
// data[index[i]:index[i+1]]. So the number of elements in index is always one
|
||||
// greater than the number of languages for which header defines a value.
|
||||
// A string for a language may be empty, which means the name is undefined. In
|
||||
// the above example, the name for fi (Finnish) is undefined.
|
||||
type header struct {
|
||||
data string
|
||||
index []uint16
|
||||
}
|
||||
|
||||
// name looks up the name for a tag in the dictionary, given its index.
|
||||
func (h *header) name(i int) string {
|
||||
if 0 <= i && i < len(h.index)-1 {
|
||||
return h.data[h.index[i]:h.index[i+1]]
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// tagSet is used to find the index of a language in a set of tags.
|
||||
type tagSet struct {
|
||||
single tagIndex
|
||||
long []string
|
||||
}
|
||||
|
||||
var (
|
||||
langTagSet = tagSet{
|
||||
single: langIndex,
|
||||
long: langTagsLong,
|
||||
}
|
||||
|
||||
// selfTagSet is used for indexing the language strings in their own
|
||||
// language.
|
||||
selfTagSet = tagSet{
|
||||
single: selfIndex,
|
||||
long: selfTagsLong,
|
||||
}
|
||||
|
||||
zzzz = language.MustParseScript("Zzzz")
|
||||
zz = language.MustParseRegion("ZZ")
|
||||
)
|
||||
|
||||
// index returns the index of the tag for the given base, script and region or
|
||||
// its parent if the tag is not available. If the match is for a parent entry,
|
||||
// the excess script and region are returned.
|
||||
func (ts *tagSet) index(base language.Base, scr language.Script, reg language.Region) (int, language.Script, language.Region) {
|
||||
lang := base.String()
|
||||
index := -1
|
||||
if (scr != language.Script{} || reg != language.Region{}) {
|
||||
if scr == zzzz {
|
||||
scr = language.Script{}
|
||||
}
|
||||
if reg == zz {
|
||||
reg = language.Region{}
|
||||
}
|
||||
|
||||
i := sort.SearchStrings(ts.long, lang)
|
||||
// All entries have either a script or a region and not both.
|
||||
scrStr, regStr := scr.String(), reg.String()
|
||||
for ; i < len(ts.long) && strings.HasPrefix(ts.long[i], lang); i++ {
|
||||
if s := ts.long[i][len(lang)+1:]; s == scrStr {
|
||||
scr = language.Script{}
|
||||
index = i + ts.single.len()
|
||||
break
|
||||
} else if s == regStr {
|
||||
reg = language.Region{}
|
||||
index = i + ts.single.len()
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if index == -1 {
|
||||
index = ts.single.index(lang)
|
||||
}
|
||||
return index, scr, reg
|
||||
}
|
||||
|
||||
func (ts *tagSet) Tags() []language.Tag {
|
||||
tags := make([]language.Tag, 0, ts.single.len()+len(ts.long))
|
||||
ts.single.keys(func(s string) {
|
||||
tags = append(tags, language.Raw.MustParse(s))
|
||||
})
|
||||
for _, s := range ts.long {
|
||||
tags = append(tags, language.Raw.MustParse(s))
|
||||
}
|
||||
return tags
|
||||
}
|
||||
|
||||
func supportedScripts() []language.Script {
|
||||
scr := make([]language.Script, 0, scriptIndex.len())
|
||||
scriptIndex.keys(func(s string) {
|
||||
scr = append(scr, language.MustParseScript(s))
|
||||
})
|
||||
return scr
|
||||
}
|
||||
|
||||
func supportedRegions() []language.Region {
|
||||
reg := make([]language.Region, 0, regionIndex.len())
|
||||
regionIndex.keys(func(s string) {
|
||||
reg = append(reg, language.MustParseRegion(s))
|
||||
})
|
||||
return reg
|
||||
}
|
||||
|
||||
// tagIndex holds a concatenated lists of subtags of length 2 to 4, one string
|
||||
// for each length, which can be used in combination with binary search to get
|
||||
// the index associated with a tag.
|
||||
// For example, a tagIndex{
|
||||
// "arenesfrruzh", // 6 2-byte tags.
|
||||
// "barwae", // 2 3-byte tags.
|
||||
// "",
|
||||
// }
|
||||
// would mean that the 2-byte tag "fr" had an index of 3, and the 3-byte tag
|
||||
// "wae" had an index of 7.
|
||||
type tagIndex [3]string
|
||||
|
||||
func (t *tagIndex) index(s string) int {
|
||||
sz := len(s)
|
||||
if sz < 2 || 4 < sz {
|
||||
return -1
|
||||
}
|
||||
a := t[sz-2]
|
||||
index := sort.Search(len(a)/sz, func(i int) bool {
|
||||
p := i * sz
|
||||
return a[p:p+sz] >= s
|
||||
})
|
||||
p := index * sz
|
||||
if end := p + sz; end > len(a) || a[p:end] != s {
|
||||
return -1
|
||||
}
|
||||
// Add the number of tags for smaller sizes.
|
||||
for i := 0; i < sz-2; i++ {
|
||||
index += len(t[i]) / (i + 2)
|
||||
}
|
||||
return index
|
||||
}
|
||||
|
||||
// len returns the number of tags that are contained in the tagIndex.
|
||||
func (t *tagIndex) len() (n int) {
|
||||
for i, s := range t {
|
||||
n += len(s) / (i + 2)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// keys calls f for each tag.
|
||||
func (t *tagIndex) keys(f func(key string)) {
|
||||
for i, s := range *t {
|
||||
for ; s != ""; s = s[i+2:] {
|
||||
f(s[:i+2])
|
||||
}
|
||||
}
|
||||
}
|
602
vendor/golang.org/x/text/language/display/maketables.go
generated
vendored
Normal file
602
vendor/golang.org/x/text/language/display/maketables.go
generated
vendored
Normal file
@ -0,0 +1,602 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build ignore
|
||||
|
||||
// Generator for display name tables.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/text/internal/gen"
|
||||
"golang.org/x/text/language"
|
||||
"golang.org/x/text/unicode/cldr"
|
||||
)
|
||||
|
||||
var (
|
||||
test = flag.Bool("test", false,
|
||||
"test existing tables; can be used to compare web data with package data.")
|
||||
outputFile = flag.String("output", "tables.go", "output file")
|
||||
|
||||
stats = flag.Bool("stats", false, "prints statistics to stderr")
|
||||
|
||||
short = flag.Bool("short", false, `Use "short" alternatives, when available.`)
|
||||
draft = flag.String("draft",
|
||||
"contributed",
|
||||
`Minimal draft requirements (approved, contributed, provisional, unconfirmed).`)
|
||||
pkg = flag.String("package",
|
||||
"display",
|
||||
"the name of the package in which the generated file is to be included")
|
||||
|
||||
tags = newTagSet("tags",
|
||||
[]language.Tag{},
|
||||
"space-separated list of tags to include or empty for all")
|
||||
dict = newTagSet("dict",
|
||||
dictTags(),
|
||||
"space-separated list or tags for which to include a Dictionary. "+
|
||||
`"" means the common list from go.text/language.`)
|
||||
)
|
||||
|
||||
func dictTags() (tag []language.Tag) {
|
||||
// TODO: replace with language.Common.Tags() once supported.
|
||||
const str = "af am ar ar-001 az bg bn ca cs da de el en en-US en-GB " +
|
||||
"es es-ES es-419 et fa fi fil fr fr-CA gu he hi hr hu hy id is it ja " +
|
||||
"ka kk km kn ko ky lo lt lv mk ml mn mr ms my ne nl no pa pl pt pt-BR " +
|
||||
"pt-PT ro ru si sk sl sq sr sr-Latn sv sw ta te th tr uk ur uz vi " +
|
||||
"zh zh-Hans zh-Hant zu"
|
||||
|
||||
for _, s := range strings.Split(str, " ") {
|
||||
tag = append(tag, language.MustParse(s))
|
||||
}
|
||||
return tag
|
||||
}
|
||||
|
||||
func main() {
|
||||
gen.Init()
|
||||
|
||||
// Read the CLDR zip file.
|
||||
r := gen.OpenCLDRCoreZip()
|
||||
defer r.Close()
|
||||
|
||||
d := &cldr.Decoder{}
|
||||
d.SetDirFilter("main", "supplemental")
|
||||
d.SetSectionFilter("localeDisplayNames")
|
||||
data, err := d.DecodeZip(r)
|
||||
if err != nil {
|
||||
log.Fatalf("DecodeZip: %v", err)
|
||||
}
|
||||
|
||||
w := gen.NewCodeWriter()
|
||||
defer w.WriteGoFile(*outputFile, "display")
|
||||
|
||||
gen.WriteCLDRVersion(w)
|
||||
|
||||
b := builder{
|
||||
w: w,
|
||||
data: data,
|
||||
group: make(map[string]*group),
|
||||
}
|
||||
b.generate()
|
||||
}
|
||||
|
||||
const tagForm = language.All
|
||||
|
||||
// tagSet is used to parse command line flags of tags. It implements the
|
||||
// flag.Value interface.
|
||||
type tagSet map[language.Tag]bool
|
||||
|
||||
func newTagSet(name string, tags []language.Tag, usage string) tagSet {
|
||||
f := tagSet(make(map[language.Tag]bool))
|
||||
for _, t := range tags {
|
||||
f[t] = true
|
||||
}
|
||||
flag.Var(f, name, usage)
|
||||
return f
|
||||
}
|
||||
|
||||
// String implements the String method of the flag.Value interface.
|
||||
func (f tagSet) String() string {
|
||||
tags := []string{}
|
||||
for t := range f {
|
||||
tags = append(tags, t.String())
|
||||
}
|
||||
sort.Strings(tags)
|
||||
return strings.Join(tags, " ")
|
||||
}
|
||||
|
||||
// Set implements Set from the flag.Value interface.
|
||||
func (f tagSet) Set(s string) error {
|
||||
if s != "" {
|
||||
for _, s := range strings.Split(s, " ") {
|
||||
if s != "" {
|
||||
tag, err := tagForm.Parse(s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f[tag] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f tagSet) contains(t language.Tag) bool {
|
||||
if len(f) == 0 {
|
||||
return true
|
||||
}
|
||||
return f[t]
|
||||
}
|
||||
|
||||
// builder is used to create all tables with display name information.
|
||||
type builder struct {
|
||||
w *gen.CodeWriter
|
||||
|
||||
data *cldr.CLDR
|
||||
|
||||
fromLocs []string
|
||||
|
||||
// destination tags for the current locale.
|
||||
toTags []string
|
||||
toTagIndex map[string]int
|
||||
|
||||
// list of supported tags
|
||||
supported []language.Tag
|
||||
|
||||
// key-value pairs per group
|
||||
group map[string]*group
|
||||
|
||||
// statistics
|
||||
sizeIndex int // total size of all indexes of headers
|
||||
sizeData int // total size of all data of headers
|
||||
totalSize int
|
||||
}
|
||||
|
||||
type group struct {
|
||||
// Maps from a given language to the Namer data for this language.
|
||||
lang map[language.Tag]keyValues
|
||||
headers []header
|
||||
|
||||
toTags []string
|
||||
threeStart int
|
||||
fourPlusStart int
|
||||
}
|
||||
|
||||
// set sets the typ to the name for locale loc.
|
||||
func (g *group) set(t language.Tag, typ, name string) {
|
||||
kv := g.lang[t]
|
||||
if kv == nil {
|
||||
kv = make(keyValues)
|
||||
g.lang[t] = kv
|
||||
}
|
||||
if kv[typ] == "" {
|
||||
kv[typ] = name
|
||||
}
|
||||
}
|
||||
|
||||
type keyValues map[string]string
|
||||
|
||||
type header struct {
|
||||
tag language.Tag
|
||||
data string
|
||||
index []uint16
|
||||
}
|
||||
|
||||
var versionInfo = `// Version is deprecated. Use CLDRVersion.
|
||||
const Version = %#v
|
||||
|
||||
`
|
||||
|
||||
var self = language.MustParse("mul")
|
||||
|
||||
// generate builds and writes all tables.
|
||||
func (b *builder) generate() {
|
||||
fmt.Fprintf(b.w, versionInfo, cldr.Version)
|
||||
|
||||
b.filter()
|
||||
b.setData("lang", func(g *group, loc language.Tag, ldn *cldr.LocaleDisplayNames) {
|
||||
if ldn.Languages != nil {
|
||||
for _, v := range ldn.Languages.Language {
|
||||
lang := v.Type
|
||||
if lang == "root" {
|
||||
// We prefer the data from "und"
|
||||
// TODO: allow both the data for root and und somehow.
|
||||
continue
|
||||
}
|
||||
tag := tagForm.MustParse(lang)
|
||||
if tags.contains(tag) {
|
||||
g.set(loc, tag.String(), v.Data())
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
b.setData("script", func(g *group, loc language.Tag, ldn *cldr.LocaleDisplayNames) {
|
||||
if ldn.Scripts != nil {
|
||||
for _, v := range ldn.Scripts.Script {
|
||||
code := language.MustParseScript(v.Type)
|
||||
if code.IsPrivateUse() { // Qaaa..Qabx
|
||||
// TODO: data currently appears to be very meager.
|
||||
// Reconsider if we have data for English.
|
||||
if loc == language.English {
|
||||
log.Fatal("Consider including data for private use scripts.")
|
||||
}
|
||||
continue
|
||||
}
|
||||
g.set(loc, code.String(), v.Data())
|
||||
}
|
||||
}
|
||||
})
|
||||
b.setData("region", func(g *group, loc language.Tag, ldn *cldr.LocaleDisplayNames) {
|
||||
if ldn.Territories != nil {
|
||||
for _, v := range ldn.Territories.Territory {
|
||||
g.set(loc, language.MustParseRegion(v.Type).String(), v.Data())
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
b.makeSupported()
|
||||
|
||||
b.writeParents()
|
||||
|
||||
b.writeGroup("lang")
|
||||
b.writeGroup("script")
|
||||
b.writeGroup("region")
|
||||
|
||||
b.w.WriteConst("numSupported", len(b.supported))
|
||||
buf := bytes.Buffer{}
|
||||
for _, tag := range b.supported {
|
||||
fmt.Fprint(&buf, tag.String(), "|")
|
||||
}
|
||||
b.w.WriteConst("supported", buf.String())
|
||||
|
||||
b.writeDictionaries()
|
||||
|
||||
b.supported = []language.Tag{self}
|
||||
|
||||
// Compute the names of locales in their own language. Some of these names
|
||||
// may be specified in their parent locales. We iterate the maximum depth
|
||||
// of the parent three times to match successive parents of tags until a
|
||||
// possible match is found.
|
||||
for i := 0; i < 4; i++ {
|
||||
b.setData("self", func(g *group, tag language.Tag, ldn *cldr.LocaleDisplayNames) {
|
||||
parent := tag
|
||||
if b, s, r := tag.Raw(); i > 0 && (s != language.Script{} && r == language.Region{}) {
|
||||
parent, _ = language.Raw.Compose(b)
|
||||
}
|
||||
if ldn.Languages != nil {
|
||||
for _, v := range ldn.Languages.Language {
|
||||
key := tagForm.MustParse(v.Type)
|
||||
saved := key
|
||||
if key == parent {
|
||||
g.set(self, tag.String(), v.Data())
|
||||
}
|
||||
for k := 0; k < i; k++ {
|
||||
key = key.Parent()
|
||||
}
|
||||
if key == tag {
|
||||
g.set(self, saved.String(), v.Data()) // set does not overwrite a value.
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
b.writeGroup("self")
|
||||
}
|
||||
|
||||
func (b *builder) setData(name string, f func(*group, language.Tag, *cldr.LocaleDisplayNames)) {
|
||||
b.sizeIndex = 0
|
||||
b.sizeData = 0
|
||||
b.toTags = nil
|
||||
b.fromLocs = nil
|
||||
b.toTagIndex = make(map[string]int)
|
||||
|
||||
g := b.group[name]
|
||||
if g == nil {
|
||||
g = &group{lang: make(map[language.Tag]keyValues)}
|
||||
b.group[name] = g
|
||||
}
|
||||
for _, loc := range b.data.Locales() {
|
||||
// We use RawLDML instead of LDML as we are managing our own inheritance
|
||||
// in this implementation.
|
||||
ldml := b.data.RawLDML(loc)
|
||||
|
||||
// We do not support the POSIX variant (it is not a supported BCP 47
|
||||
// variant). This locale also doesn't happen to contain any data, so
|
||||
// we'll skip it by checking for this.
|
||||
tag, err := tagForm.Parse(loc)
|
||||
if err != nil {
|
||||
if ldml.LocaleDisplayNames != nil {
|
||||
log.Fatalf("setData: %v", err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
if ldml.LocaleDisplayNames != nil && tags.contains(tag) {
|
||||
f(g, tag, ldml.LocaleDisplayNames)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (b *builder) filter() {
|
||||
filter := func(s *cldr.Slice) {
|
||||
if *short {
|
||||
s.SelectOnePerGroup("alt", []string{"short", ""})
|
||||
} else {
|
||||
s.SelectOnePerGroup("alt", []string{"stand-alone", ""})
|
||||
}
|
||||
d, err := cldr.ParseDraft(*draft)
|
||||
if err != nil {
|
||||
log.Fatalf("filter: %v", err)
|
||||
}
|
||||
s.SelectDraft(d)
|
||||
}
|
||||
for _, loc := range b.data.Locales() {
|
||||
if ldn := b.data.RawLDML(loc).LocaleDisplayNames; ldn != nil {
|
||||
if ldn.Languages != nil {
|
||||
s := cldr.MakeSlice(&ldn.Languages.Language)
|
||||
if filter(&s); len(ldn.Languages.Language) == 0 {
|
||||
ldn.Languages = nil
|
||||
}
|
||||
}
|
||||
if ldn.Scripts != nil {
|
||||
s := cldr.MakeSlice(&ldn.Scripts.Script)
|
||||
if filter(&s); len(ldn.Scripts.Script) == 0 {
|
||||
ldn.Scripts = nil
|
||||
}
|
||||
}
|
||||
if ldn.Territories != nil {
|
||||
s := cldr.MakeSlice(&ldn.Territories.Territory)
|
||||
if filter(&s); len(ldn.Territories.Territory) == 0 {
|
||||
ldn.Territories = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// makeSupported creates a list of all supported locales.
|
||||
func (b *builder) makeSupported() {
|
||||
// tags across groups
|
||||
for _, g := range b.group {
|
||||
for t, _ := range g.lang {
|
||||
b.supported = append(b.supported, t)
|
||||
}
|
||||
}
|
||||
b.supported = b.supported[:unique(tagsSorter(b.supported))]
|
||||
|
||||
}
|
||||
|
||||
type tagsSorter []language.Tag
|
||||
|
||||
func (a tagsSorter) Len() int { return len(a) }
|
||||
func (a tagsSorter) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
func (a tagsSorter) Less(i, j int) bool { return a[i].String() < a[j].String() }
|
||||
|
||||
func (b *builder) writeGroup(name string) {
|
||||
g := b.group[name]
|
||||
|
||||
for _, kv := range g.lang {
|
||||
for t, _ := range kv {
|
||||
g.toTags = append(g.toTags, t)
|
||||
}
|
||||
}
|
||||
g.toTags = g.toTags[:unique(tagsBySize(g.toTags))]
|
||||
|
||||
// Allocate header per supported value.
|
||||
g.headers = make([]header, len(b.supported))
|
||||
for i, sup := range b.supported {
|
||||
kv, ok := g.lang[sup]
|
||||
if !ok {
|
||||
g.headers[i].tag = sup
|
||||
continue
|
||||
}
|
||||
data := []byte{}
|
||||
index := make([]uint16, len(g.toTags), len(g.toTags)+1)
|
||||
for j, t := range g.toTags {
|
||||
index[j] = uint16(len(data))
|
||||
data = append(data, kv[t]...)
|
||||
}
|
||||
index = append(index, uint16(len(data)))
|
||||
|
||||
// Trim the tail of the index.
|
||||
// TODO: indexes can be reduced in size quite a bit more.
|
||||
n := len(index)
|
||||
for ; n >= 2 && index[n-2] == index[n-1]; n-- {
|
||||
}
|
||||
index = index[:n]
|
||||
|
||||
// Workaround for a bug in CLDR 26.
|
||||
// See http://unicode.org/cldr/trac/ticket/8042.
|
||||
if cldr.Version == "26" && sup.String() == "hsb" {
|
||||
data = bytes.Replace(data, []byte{'"'}, nil, 1)
|
||||
}
|
||||
g.headers[i] = header{sup, string(data), index}
|
||||
}
|
||||
g.writeTable(b.w, name)
|
||||
}
|
||||
|
||||
type tagsBySize []string
|
||||
|
||||
func (l tagsBySize) Len() int { return len(l) }
|
||||
func (l tagsBySize) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
|
||||
func (l tagsBySize) Less(i, j int) bool {
|
||||
a, b := l[i], l[j]
|
||||
// Sort single-tag entries based on size first. Otherwise alphabetic.
|
||||
if len(a) != len(b) && (len(a) <= 4 || len(b) <= 4) {
|
||||
return len(a) < len(b)
|
||||
}
|
||||
return a < b
|
||||
}
|
||||
|
||||
// parentIndices returns slice a of len(tags) where tags[a[i]] is the parent
|
||||
// of tags[i].
|
||||
func parentIndices(tags []language.Tag) []int16 {
|
||||
index := make(map[language.Tag]int16)
|
||||
for i, t := range tags {
|
||||
index[t] = int16(i)
|
||||
}
|
||||
|
||||
// Construct default parents.
|
||||
parents := make([]int16, len(tags))
|
||||
for i, t := range tags {
|
||||
parents[i] = -1
|
||||
for t = t.Parent(); t != language.Und; t = t.Parent() {
|
||||
if j, ok := index[t]; ok {
|
||||
parents[i] = j
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
return parents
|
||||
}
|
||||
|
||||
func (b *builder) writeParents() {
|
||||
parents := parentIndices(b.supported)
|
||||
fmt.Fprintf(b.w, "var parents = ")
|
||||
b.w.WriteArray(parents)
|
||||
}
|
||||
|
||||
// writeKeys writes keys to a special index used by the display package.
|
||||
// tags are assumed to be sorted by length.
|
||||
func writeKeys(w *gen.CodeWriter, name string, keys []string) {
|
||||
w.Size += int(3 * reflect.TypeOf("").Size())
|
||||
w.WriteComment("Number of keys: %d", len(keys))
|
||||
fmt.Fprintf(w, "var (\n\t%sIndex = tagIndex{\n", name)
|
||||
for i := 2; i <= 4; i++ {
|
||||
sub := []string{}
|
||||
for _, t := range keys {
|
||||
if len(t) != i {
|
||||
break
|
||||
}
|
||||
sub = append(sub, t)
|
||||
}
|
||||
s := strings.Join(sub, "")
|
||||
w.WriteString(s)
|
||||
fmt.Fprintf(w, ",\n")
|
||||
keys = keys[len(sub):]
|
||||
}
|
||||
fmt.Fprintln(w, "\t}")
|
||||
if len(keys) > 0 {
|
||||
w.Size += int(reflect.TypeOf([]string{}).Size())
|
||||
fmt.Fprintf(w, "\t%sTagsLong = ", name)
|
||||
w.WriteSlice(keys)
|
||||
}
|
||||
fmt.Fprintln(w, ")\n")
|
||||
}
|
||||
|
||||
// identifier creates an identifier from the given tag.
|
||||
func identifier(t language.Tag) string {
|
||||
return strings.Replace(t.String(), "-", "", -1)
|
||||
}
|
||||
|
||||
func (h *header) writeEntry(w *gen.CodeWriter, name string) {
|
||||
if len(dict) > 0 && dict.contains(h.tag) {
|
||||
fmt.Fprintf(w, "\t{ // %s\n", h.tag)
|
||||
fmt.Fprintf(w, "\t\t%[1]s%[2]sStr,\n\t\t%[1]s%[2]sIdx,\n", identifier(h.tag), name)
|
||||
fmt.Fprintln(w, "\t},")
|
||||
} else if len(h.data) == 0 {
|
||||
fmt.Fprintln(w, "\t\t{}, //", h.tag)
|
||||
} else {
|
||||
fmt.Fprintf(w, "\t{ // %s\n", h.tag)
|
||||
w.WriteString(h.data)
|
||||
fmt.Fprintln(w, ",")
|
||||
w.WriteSlice(h.index)
|
||||
fmt.Fprintln(w, ",\n\t},")
|
||||
}
|
||||
}
|
||||
|
||||
// write the data for the given header as single entries. The size for this data
|
||||
// was already accounted for in writeEntry.
|
||||
func (h *header) writeSingle(w *gen.CodeWriter, name string) {
|
||||
if len(dict) > 0 && dict.contains(h.tag) {
|
||||
tag := identifier(h.tag)
|
||||
w.WriteConst(tag+name+"Str", h.data)
|
||||
|
||||
// Note that we create a slice instead of an array. If we use an array
|
||||
// we need to refer to it as a[:] in other tables, which will cause the
|
||||
// array to always be included by the linker. See Issue 7651.
|
||||
w.WriteVar(tag+name+"Idx", h.index)
|
||||
}
|
||||
}
|
||||
|
||||
// WriteTable writes an entry for a single Namer.
|
||||
func (g *group) writeTable(w *gen.CodeWriter, name string) {
|
||||
start := w.Size
|
||||
writeKeys(w, name, g.toTags)
|
||||
w.Size += len(g.headers) * int(reflect.ValueOf(g.headers[0]).Type().Size())
|
||||
|
||||
fmt.Fprintf(w, "var %sHeaders = [%d]header{\n", name, len(g.headers))
|
||||
|
||||
title := strings.Title(name)
|
||||
for _, h := range g.headers {
|
||||
h.writeEntry(w, title)
|
||||
}
|
||||
fmt.Fprintln(w, "}\n")
|
||||
|
||||
for _, h := range g.headers {
|
||||
h.writeSingle(w, title)
|
||||
}
|
||||
n := w.Size - start
|
||||
fmt.Fprintf(w, "// Total size for %s: %d bytes (%d KB)\n\n", name, n, n/1000)
|
||||
}
|
||||
|
||||
func (b *builder) writeDictionaries() {
|
||||
fmt.Fprintln(b.w, "// Dictionary entries of frequent languages")
|
||||
fmt.Fprintln(b.w, "var (")
|
||||
parents := parentIndices(b.supported)
|
||||
|
||||
for i, t := range b.supported {
|
||||
if dict.contains(t) {
|
||||
ident := identifier(t)
|
||||
fmt.Fprintf(b.w, "\t%s = Dictionary{ // %s\n", ident, t)
|
||||
if p := parents[i]; p == -1 {
|
||||
fmt.Fprintln(b.w, "\t\tnil,")
|
||||
} else {
|
||||
fmt.Fprintf(b.w, "\t\t&%s,\n", identifier(b.supported[p]))
|
||||
}
|
||||
fmt.Fprintf(b.w, "\t\theader{%[1]sLangStr, %[1]sLangIdx},\n", ident)
|
||||
fmt.Fprintf(b.w, "\t\theader{%[1]sScriptStr, %[1]sScriptIdx},\n", ident)
|
||||
fmt.Fprintf(b.w, "\t\theader{%[1]sRegionStr, %[1]sRegionIdx},\n", ident)
|
||||
fmt.Fprintln(b.w, "\t}")
|
||||
}
|
||||
}
|
||||
fmt.Fprintln(b.w, ")")
|
||||
|
||||
var s string
|
||||
var a []uint16
|
||||
sz := reflect.TypeOf(s).Size()
|
||||
sz += reflect.TypeOf(a).Size()
|
||||
sz *= 3
|
||||
sz += reflect.TypeOf(&a).Size()
|
||||
n := int(sz) * len(dict)
|
||||
fmt.Fprintf(b.w, "// Total size for %d entries: %d bytes (%d KB)\n\n", len(dict), n, n/1000)
|
||||
|
||||
b.w.Size += n
|
||||
}
|
||||
|
||||
// unique sorts the given lists and removes duplicate entries by swapping them
|
||||
// past position k, where k is the number of unique values. It returns k.
|
||||
func unique(a sort.Interface) int {
|
||||
if a.Len() == 0 {
|
||||
return 0
|
||||
}
|
||||
sort.Sort(a)
|
||||
k := 1
|
||||
for i := 1; i < a.Len(); i++ {
|
||||
if a.Less(k-1, i) {
|
||||
if k != i {
|
||||
a.Swap(k, i)
|
||||
}
|
||||
k++
|
||||
}
|
||||
}
|
||||
return k
|
||||
}
|
53114
vendor/golang.org/x/text/language/display/tables.go
generated
vendored
Normal file
53114
vendor/golang.org/x/text/language/display/tables.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
102
vendor/golang.org/x/text/language/doc.go
generated
vendored
Normal file
102
vendor/golang.org/x/text/language/doc.go
generated
vendored
Normal file
@ -0,0 +1,102 @@
|
||||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package language implements BCP 47 language tags and related functionality.
|
||||
//
|
||||
// The most important function of package language is to match a list of
|
||||
// user-preferred languages to a list of supported languages.
|
||||
// It alleviates the developer of dealing with the complexity of this process
|
||||
// and provides the user with the best experience
|
||||
// (see https://blog.golang.org/matchlang).
|
||||
//
|
||||
//
|
||||
// Matching preferred against supported languages
|
||||
//
|
||||
// A Matcher for an application that supports English, Australian English,
|
||||
// Danish, and standard Mandarin can be created as follows:
|
||||
//
|
||||
// var matcher = language.NewMatcher([]language.Tag{
|
||||
// language.English, // The first language is used as fallback.
|
||||
// language.MustParse("en-AU"),
|
||||
// language.Danish,
|
||||
// language.Chinese,
|
||||
// })
|
||||
//
|
||||
// This list of supported languages is typically implied by the languages for
|
||||
// which there exists translations of the user interface.
|
||||
//
|
||||
// User-preferred languages usually come as a comma-separated list of BCP 47
|
||||
// language tags.
|
||||
// The MatchString finds best matches for such strings:
|
||||
//
|
||||
// handler(w http.ResponseWriter, r *http.Request) {
|
||||
// lang, _ := r.Cookie("lang")
|
||||
// accept := r.Header.Get("Accept-Language")
|
||||
// tag, _ := language.MatchStrings(matcher, lang.String(), accept)
|
||||
//
|
||||
// // tag should now be used for the initialization of any
|
||||
// // locale-specific service.
|
||||
// }
|
||||
//
|
||||
// The Matcher's Match method can be used to match Tags directly.
|
||||
//
|
||||
// Matchers are aware of the intricacies of equivalence between languages, such
|
||||
// as deprecated subtags, legacy tags, macro languages, mutual
|
||||
// intelligibility between scripts and languages, and transparently passing
|
||||
// BCP 47 user configuration.
|
||||
// For instance, it will know that a reader of Bokmål Danish can read Norwegian
|
||||
// and will know that Cantonese ("yue") is a good match for "zh-HK".
|
||||
//
|
||||
//
|
||||
// Using match results
|
||||
//
|
||||
// To guarantee a consistent user experience to the user it is important to
|
||||
// use the same language tag for the selection of any locale-specific services.
|
||||
// For example, it is utterly confusing to substitute spelled-out numbers
|
||||
// or dates in one language in text of another language.
|
||||
// More subtly confusing is using the wrong sorting order or casing
|
||||
// algorithm for a certain language.
|
||||
//
|
||||
// All the packages in x/text that provide locale-specific services
|
||||
// (e.g. collate, cases) should be initialized with the tag that was
|
||||
// obtained at the start of an interaction with the user.
|
||||
//
|
||||
// Note that Tag that is returned by Match and MatchString may differ from any
|
||||
// of the supported languages, as it may contain carried over settings from
|
||||
// the user tags.
|
||||
// This may be inconvenient when your application has some additional
|
||||
// locale-specific data for your supported languages.
|
||||
// Match and MatchString both return the index of the matched supported tag
|
||||
// to simplify associating such data with the matched tag.
|
||||
//
|
||||
//
|
||||
// Canonicalization
|
||||
//
|
||||
// If one uses the Matcher to compare languages one does not need to
|
||||
// worry about canonicalization.
|
||||
//
|
||||
// The meaning of a Tag varies per application. The language package
|
||||
// therefore delays canonicalization and preserves information as much
|
||||
// as possible. The Matcher, however, will always take into account that
|
||||
// two different tags may represent the same language.
|
||||
//
|
||||
// By default, only legacy and deprecated tags are converted into their
|
||||
// canonical equivalent. All other information is preserved. This approach makes
|
||||
// the confidence scores more accurate and allows matchers to distinguish
|
||||
// between variants that are otherwise lost.
|
||||
//
|
||||
// As a consequence, two tags that should be treated as identical according to
|
||||
// BCP 47 or CLDR, like "en-Latn" and "en", will be represented differently. The
|
||||
// Matcher handles such distinctions, though, and is aware of the
|
||||
// equivalence relations. The CanonType type can be used to alter the
|
||||
// canonicalization form.
|
||||
//
|
||||
// References
|
||||
//
|
||||
// BCP 47 - Tags for Identifying Languages http://tools.ietf.org/html/bcp47
|
||||
//
|
||||
package language // import "golang.org/x/text/language"
|
||||
|
||||
// TODO: explanation on how to match languages for your own locale-specific
|
||||
// service.
|
413
vendor/golang.org/x/text/language/examples_test.go
generated
vendored
Normal file
413
vendor/golang.org/x/text/language/examples_test.go
generated
vendored
Normal file
@ -0,0 +1,413 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package language_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"golang.org/x/text/language"
|
||||
)
|
||||
|
||||
func ExampleCanonType() {
|
||||
p := func(id string) {
|
||||
fmt.Printf("Default(%s) -> %s\n", id, language.Make(id))
|
||||
fmt.Printf("BCP47(%s) -> %s\n", id, language.BCP47.Make(id))
|
||||
fmt.Printf("Macro(%s) -> %s\n", id, language.Macro.Make(id))
|
||||
fmt.Printf("All(%s) -> %s\n", id, language.All.Make(id))
|
||||
}
|
||||
p("en-Latn")
|
||||
p("sh")
|
||||
p("zh-cmn")
|
||||
p("bjd")
|
||||
p("iw-Latn-fonipa-u-cu-usd")
|
||||
// Output:
|
||||
// Default(en-Latn) -> en-Latn
|
||||
// BCP47(en-Latn) -> en
|
||||
// Macro(en-Latn) -> en-Latn
|
||||
// All(en-Latn) -> en
|
||||
// Default(sh) -> sr-Latn
|
||||
// BCP47(sh) -> sh
|
||||
// Macro(sh) -> sh
|
||||
// All(sh) -> sr-Latn
|
||||
// Default(zh-cmn) -> cmn
|
||||
// BCP47(zh-cmn) -> cmn
|
||||
// Macro(zh-cmn) -> zh
|
||||
// All(zh-cmn) -> zh
|
||||
// Default(bjd) -> drl
|
||||
// BCP47(bjd) -> drl
|
||||
// Macro(bjd) -> bjd
|
||||
// All(bjd) -> drl
|
||||
// Default(iw-Latn-fonipa-u-cu-usd) -> he-Latn-fonipa-u-cu-usd
|
||||
// BCP47(iw-Latn-fonipa-u-cu-usd) -> he-Latn-fonipa-u-cu-usd
|
||||
// Macro(iw-Latn-fonipa-u-cu-usd) -> iw-Latn-fonipa-u-cu-usd
|
||||
// All(iw-Latn-fonipa-u-cu-usd) -> he-Latn-fonipa-u-cu-usd
|
||||
}
|
||||
|
||||
func ExampleTag_Base() {
|
||||
fmt.Println(language.Make("und").Base())
|
||||
fmt.Println(language.Make("und-US").Base())
|
||||
fmt.Println(language.Make("und-NL").Base())
|
||||
fmt.Println(language.Make("und-419").Base()) // Latin America
|
||||
fmt.Println(language.Make("und-ZZ").Base())
|
||||
// Output:
|
||||
// en Low
|
||||
// en High
|
||||
// nl High
|
||||
// es Low
|
||||
// en Low
|
||||
}
|
||||
|
||||
func ExampleTag_Script() {
|
||||
en := language.Make("en")
|
||||
sr := language.Make("sr")
|
||||
sr_Latn := language.Make("sr_Latn")
|
||||
fmt.Println(en.Script())
|
||||
fmt.Println(sr.Script())
|
||||
// Was a script explicitly specified?
|
||||
_, c := sr.Script()
|
||||
fmt.Println(c == language.Exact)
|
||||
_, c = sr_Latn.Script()
|
||||
fmt.Println(c == language.Exact)
|
||||
// Output:
|
||||
// Latn High
|
||||
// Cyrl Low
|
||||
// false
|
||||
// true
|
||||
}
|
||||
|
||||
func ExampleTag_Region() {
|
||||
ru := language.Make("ru")
|
||||
en := language.Make("en")
|
||||
fmt.Println(ru.Region())
|
||||
fmt.Println(en.Region())
|
||||
// Output:
|
||||
// RU Low
|
||||
// US Low
|
||||
}
|
||||
|
||||
func ExampleRegion_TLD() {
|
||||
us := language.MustParseRegion("US")
|
||||
gb := language.MustParseRegion("GB")
|
||||
uk := language.MustParseRegion("UK")
|
||||
bu := language.MustParseRegion("BU")
|
||||
|
||||
fmt.Println(us.TLD())
|
||||
fmt.Println(gb.TLD())
|
||||
fmt.Println(uk.TLD())
|
||||
fmt.Println(bu.TLD())
|
||||
|
||||
fmt.Println(us.Canonicalize().TLD())
|
||||
fmt.Println(gb.Canonicalize().TLD())
|
||||
fmt.Println(uk.Canonicalize().TLD())
|
||||
fmt.Println(bu.Canonicalize().TLD())
|
||||
// Output:
|
||||
// US <nil>
|
||||
// UK <nil>
|
||||
// UK <nil>
|
||||
// ZZ language: region is not a valid ccTLD
|
||||
// US <nil>
|
||||
// UK <nil>
|
||||
// UK <nil>
|
||||
// MM <nil>
|
||||
}
|
||||
|
||||
func ExampleCompose() {
|
||||
nl, _ := language.ParseBase("nl")
|
||||
us, _ := language.ParseRegion("US")
|
||||
de := language.Make("de-1901-u-co-phonebk")
|
||||
jp := language.Make("ja-JP")
|
||||
fi := language.Make("fi-x-ing")
|
||||
|
||||
u, _ := language.ParseExtension("u-nu-arabic")
|
||||
x, _ := language.ParseExtension("x-piglatin")
|
||||
|
||||
// Combine a base language and region.
|
||||
fmt.Println(language.Compose(nl, us))
|
||||
// Combine a base language and extension.
|
||||
fmt.Println(language.Compose(nl, x))
|
||||
// Replace the region.
|
||||
fmt.Println(language.Compose(jp, us))
|
||||
// Combine several tags.
|
||||
fmt.Println(language.Compose(us, nl, u))
|
||||
|
||||
// Replace the base language of a tag.
|
||||
fmt.Println(language.Compose(de, nl))
|
||||
fmt.Println(language.Compose(de, nl, u))
|
||||
// Remove the base language.
|
||||
fmt.Println(language.Compose(de, language.Base{}))
|
||||
// Remove all variants.
|
||||
fmt.Println(language.Compose(de, []language.Variant{}))
|
||||
// Remove all extensions.
|
||||
fmt.Println(language.Compose(de, []language.Extension{}))
|
||||
fmt.Println(language.Compose(fi, []language.Extension{}))
|
||||
// Remove all variants and extensions.
|
||||
fmt.Println(language.Compose(de.Raw()))
|
||||
|
||||
// An error is gobbled or returned if non-nil.
|
||||
fmt.Println(language.Compose(language.ParseRegion("ZA")))
|
||||
fmt.Println(language.Compose(language.ParseRegion("HH")))
|
||||
|
||||
// Compose uses the same Default canonicalization as Make.
|
||||
fmt.Println(language.Compose(language.Raw.Parse("en-Latn-UK")))
|
||||
|
||||
// Call compose on a different CanonType for different results.
|
||||
fmt.Println(language.All.Compose(language.Raw.Parse("en-Latn-UK")))
|
||||
|
||||
// Output:
|
||||
// nl-US <nil>
|
||||
// nl-x-piglatin <nil>
|
||||
// ja-US <nil>
|
||||
// nl-US-u-nu-arabic <nil>
|
||||
// nl-1901-u-co-phonebk <nil>
|
||||
// nl-1901-u-nu-arabic <nil>
|
||||
// und-1901-u-co-phonebk <nil>
|
||||
// de-u-co-phonebk <nil>
|
||||
// de-1901 <nil>
|
||||
// fi <nil>
|
||||
// de <nil>
|
||||
// und-ZA <nil>
|
||||
// und language: subtag "HH" is well-formed but unknown
|
||||
// en-Latn-GB <nil>
|
||||
// en-GB <nil>
|
||||
}
|
||||
|
||||
func ExampleParse_errors() {
|
||||
for _, s := range []string{"Foo", "Bar", "Foobar"} {
|
||||
_, err := language.Parse(s)
|
||||
if err != nil {
|
||||
if inv, ok := err.(language.ValueError); ok {
|
||||
fmt.Println(inv.Subtag())
|
||||
} else {
|
||||
fmt.Println(s)
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, s := range []string{"en", "aa-Uuuu", "AC", "ac-u"} {
|
||||
_, err := language.Parse(s)
|
||||
switch e := err.(type) {
|
||||
case language.ValueError:
|
||||
fmt.Printf("%s: culprit %q\n", s, e.Subtag())
|
||||
case nil:
|
||||
// No error.
|
||||
default:
|
||||
// A syntax error.
|
||||
fmt.Printf("%s: ill-formed\n", s)
|
||||
}
|
||||
}
|
||||
// Output:
|
||||
// foo
|
||||
// Foobar
|
||||
// aa-Uuuu: culprit "Uuuu"
|
||||
// AC: culprit "ac"
|
||||
// ac-u: ill-formed
|
||||
}
|
||||
|
||||
func ExampleParent() {
|
||||
p := func(tag string) {
|
||||
fmt.Printf("parent(%v): %v\n", tag, language.Make(tag).Parent())
|
||||
}
|
||||
p("zh-CN")
|
||||
|
||||
// Australian English inherits from World English.
|
||||
p("en-AU")
|
||||
|
||||
// If the tag has a different maximized script from its parent, a tag with
|
||||
// this maximized script is inserted. This allows different language tags
|
||||
// which have the same base language and script in common to inherit from
|
||||
// a common set of settings.
|
||||
p("zh-HK")
|
||||
|
||||
// If the maximized script of the parent is not identical, CLDR will skip
|
||||
// inheriting from it, as it means there will not be many entries in common
|
||||
// and inheriting from it is nonsensical.
|
||||
p("zh-Hant")
|
||||
|
||||
// The parent of a tag with variants and extensions is the tag with all
|
||||
// variants and extensions removed.
|
||||
p("de-1994-u-co-phonebk")
|
||||
|
||||
// Remove default script.
|
||||
p("de-Latn-LU")
|
||||
|
||||
// Output:
|
||||
// parent(zh-CN): zh
|
||||
// parent(en-AU): en-001
|
||||
// parent(zh-HK): zh-Hant
|
||||
// parent(zh-Hant): und
|
||||
// parent(de-1994-u-co-phonebk): de
|
||||
// parent(de-Latn-LU): de
|
||||
}
|
||||
|
||||
// ExampleMatcher_bestMatch gives some examples of getting the best match of
|
||||
// a set of tags to any of the tags of given set.
|
||||
func ExampleMatcher() {
|
||||
// This is the set of tags from which we want to pick the best match. These
|
||||
// can be, for example, the supported languages for some package.
|
||||
tags := []language.Tag{
|
||||
language.English,
|
||||
language.BritishEnglish,
|
||||
language.French,
|
||||
language.Afrikaans,
|
||||
language.BrazilianPortuguese,
|
||||
language.EuropeanPortuguese,
|
||||
language.Croatian,
|
||||
language.SimplifiedChinese,
|
||||
language.Raw.Make("iw-IL"),
|
||||
language.Raw.Make("iw"),
|
||||
language.Raw.Make("he"),
|
||||
}
|
||||
m := language.NewMatcher(tags)
|
||||
|
||||
// A simple match.
|
||||
fmt.Println(m.Match(language.Make("fr")))
|
||||
|
||||
// Australian English is closer to British than American English.
|
||||
fmt.Println(m.Match(language.Make("en-AU")))
|
||||
|
||||
// Default to the first tag passed to the Matcher if there is no match.
|
||||
fmt.Println(m.Match(language.Make("ar")))
|
||||
|
||||
// Get the default tag.
|
||||
fmt.Println(m.Match())
|
||||
|
||||
fmt.Println("----")
|
||||
|
||||
// Someone specifying sr-Latn is probably fine with getting Croatian.
|
||||
fmt.Println(m.Match(language.Make("sr-Latn")))
|
||||
|
||||
// We match SimplifiedChinese, but with Low confidence.
|
||||
fmt.Println(m.Match(language.TraditionalChinese))
|
||||
|
||||
// Serbian in Latin script is a closer match to Croatian than Traditional
|
||||
// Chinese to Simplified Chinese.
|
||||
fmt.Println(m.Match(language.TraditionalChinese, language.Make("sr-Latn")))
|
||||
|
||||
fmt.Println("----")
|
||||
|
||||
// In case a multiple variants of a language are available, the most spoken
|
||||
// variant is typically returned.
|
||||
fmt.Println(m.Match(language.Portuguese))
|
||||
|
||||
// Pick the first value passed to Match in case of a tie.
|
||||
fmt.Println(m.Match(language.Dutch, language.Make("fr-BE"), language.Make("af-NA")))
|
||||
fmt.Println(m.Match(language.Dutch, language.Make("af-NA"), language.Make("fr-BE")))
|
||||
|
||||
fmt.Println("----")
|
||||
|
||||
// If a Matcher is initialized with a language and it's deprecated version,
|
||||
// it will distinguish between them.
|
||||
fmt.Println(m.Match(language.Raw.Make("iw")))
|
||||
|
||||
// However, for non-exact matches, it will treat deprecated versions as
|
||||
// equivalent and consider other factors first.
|
||||
fmt.Println(m.Match(language.Raw.Make("he-IL")))
|
||||
|
||||
fmt.Println("----")
|
||||
|
||||
// User settings passed to the Unicode extension are ignored for matching
|
||||
// and preserved in the returned tag.
|
||||
fmt.Println(m.Match(language.Make("de-u-co-phonebk"), language.Make("fr-u-cu-frf")))
|
||||
|
||||
// Even if the matching language is different.
|
||||
fmt.Println(m.Match(language.Make("de-u-co-phonebk"), language.Make("br-u-cu-frf")))
|
||||
|
||||
// If there is no matching language, the options of the first preferred tag are used.
|
||||
fmt.Println(m.Match(language.Make("de-u-co-phonebk")))
|
||||
|
||||
// Output:
|
||||
// fr 2 Exact
|
||||
// en-GB 1 High
|
||||
// en 0 No
|
||||
// en 0 No
|
||||
// ----
|
||||
// hr 6 High
|
||||
// zh-Hans 7 Low
|
||||
// hr 6 High
|
||||
// ----
|
||||
// pt-BR 4 High
|
||||
// fr 2 High
|
||||
// af 3 High
|
||||
// ----
|
||||
// iw 9 Exact
|
||||
// he 10 Exact
|
||||
// ----
|
||||
// fr-u-cu-frf 2 Exact
|
||||
// fr-u-cu-frf 2 High
|
||||
// en-u-co-phonebk 0 No
|
||||
|
||||
// TODO: "he" should be "he-u-rg-IL High"
|
||||
}
|
||||
|
||||
func ExampleMatchStrings() {
|
||||
// languages supported by this service:
|
||||
matcher := language.NewMatcher([]language.Tag{
|
||||
language.English, language.Dutch, language.German,
|
||||
})
|
||||
|
||||
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
|
||||
lang, _ := r.Cookie("lang")
|
||||
tag, _ := language.MatchStrings(matcher, lang.String(), r.Header.Get("Accept-Language"))
|
||||
|
||||
fmt.Println("User language:", tag)
|
||||
})
|
||||
}
|
||||
|
||||
func ExampleComprehends() {
|
||||
// Various levels of comprehensibility.
|
||||
fmt.Println(language.Comprehends(language.English, language.English))
|
||||
fmt.Println(language.Comprehends(language.AmericanEnglish, language.BritishEnglish))
|
||||
|
||||
// An explicit Und results in no match.
|
||||
fmt.Println(language.Comprehends(language.English, language.Und))
|
||||
|
||||
fmt.Println("----")
|
||||
|
||||
// There is usually no mutual comprehensibility between different scripts.
|
||||
fmt.Println(language.Comprehends(language.Make("en-Dsrt"), language.English))
|
||||
|
||||
// One exception is for Traditional versus Simplified Chinese, albeit with
|
||||
// a low confidence.
|
||||
fmt.Println(language.Comprehends(language.TraditionalChinese, language.SimplifiedChinese))
|
||||
|
||||
fmt.Println("----")
|
||||
|
||||
// A Swiss German speaker will often understand High German.
|
||||
fmt.Println(language.Comprehends(language.Make("gsw"), language.Make("de")))
|
||||
|
||||
// The converse is not generally the case.
|
||||
fmt.Println(language.Comprehends(language.Make("de"), language.Make("gsw")))
|
||||
|
||||
// Output:
|
||||
// Exact
|
||||
// High
|
||||
// No
|
||||
// ----
|
||||
// No
|
||||
// Low
|
||||
// ----
|
||||
// High
|
||||
// No
|
||||
}
|
||||
|
||||
func ExampleTag_values() {
|
||||
us := language.MustParseRegion("US")
|
||||
en := language.MustParseBase("en")
|
||||
|
||||
lang, _, region := language.AmericanEnglish.Raw()
|
||||
fmt.Println(lang == en, region == us)
|
||||
|
||||
lang, _, region = language.BritishEnglish.Raw()
|
||||
fmt.Println(lang == en, region == us)
|
||||
|
||||
// Tags can be compared for exact equivalence using '=='.
|
||||
en_us, _ := language.Compose(en, us)
|
||||
fmt.Println(en_us == language.AmericanEnglish)
|
||||
|
||||
// Output:
|
||||
// true true
|
||||
// true false
|
||||
// true
|
||||
}
|
1712
vendor/golang.org/x/text/language/gen.go
generated
vendored
Normal file
1712
vendor/golang.org/x/text/language/gen.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
20
vendor/golang.org/x/text/language/gen_common.go
generated
vendored
Normal file
20
vendor/golang.org/x/text/language/gen_common.go
generated
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build ignore
|
||||
|
||||
package main
|
||||
|
||||
// This file contains code common to the maketables.go and the package code.
|
||||
|
||||
// langAliasType is the type of an alias in langAliasMap.
|
||||
type langAliasType int8
|
||||
|
||||
const (
|
||||
langDeprecated langAliasType = iota
|
||||
langMacro
|
||||
langLegacy
|
||||
|
||||
langAliasTypeUnknown langAliasType = -1
|
||||
)
|
162
vendor/golang.org/x/text/language/gen_index.go
generated
vendored
Normal file
162
vendor/golang.org/x/text/language/gen_index.go
generated
vendored
Normal file
@ -0,0 +1,162 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build ignore
|
||||
|
||||
package main
|
||||
|
||||
// This file generates derivative tables based on the language package itself.
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/text/internal/gen"
|
||||
"golang.org/x/text/language"
|
||||
"golang.org/x/text/unicode/cldr"
|
||||
)
|
||||
|
||||
var (
|
||||
test = flag.Bool("test", false,
|
||||
"test existing tables; can be used to compare web data with package data.")
|
||||
|
||||
draft = flag.String("draft",
|
||||
"contributed",
|
||||
`Minimal draft requirements (approved, contributed, provisional, unconfirmed).`)
|
||||
)
|
||||
|
||||
func main() {
|
||||
gen.Init()
|
||||
|
||||
// Read the CLDR zip file.
|
||||
r := gen.OpenCLDRCoreZip()
|
||||
defer r.Close()
|
||||
|
||||
d := &cldr.Decoder{}
|
||||
data, err := d.DecodeZip(r)
|
||||
if err != nil {
|
||||
log.Fatalf("DecodeZip: %v", err)
|
||||
}
|
||||
|
||||
w := gen.NewCodeWriter()
|
||||
defer func() {
|
||||
buf := &bytes.Buffer{}
|
||||
|
||||
if _, err = w.WriteGo(buf, "language", ""); err != nil {
|
||||
log.Fatalf("Error formatting file index.go: %v", err)
|
||||
}
|
||||
|
||||
// Since we're generating a table for our own package we need to rewrite
|
||||
// doing the equivalent of go fmt -r 'language.b -> b'. Using
|
||||
// bytes.Replace will do.
|
||||
out := bytes.Replace(buf.Bytes(), []byte("language."), nil, -1)
|
||||
if err := ioutil.WriteFile("index.go", out, 0600); err != nil {
|
||||
log.Fatalf("Could not create file index.go: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
m := map[language.Tag]bool{}
|
||||
for _, lang := range data.Locales() {
|
||||
// We include all locales unconditionally to be consistent with en_US.
|
||||
// We want en_US, even though it has no data associated with it.
|
||||
|
||||
// TODO: put any of the languages for which no data exists at the end
|
||||
// of the index. This allows all components based on ICU to use that
|
||||
// as the cutoff point.
|
||||
// if x := data.RawLDML(lang); false ||
|
||||
// x.LocaleDisplayNames != nil ||
|
||||
// x.Characters != nil ||
|
||||
// x.Delimiters != nil ||
|
||||
// x.Measurement != nil ||
|
||||
// x.Dates != nil ||
|
||||
// x.Numbers != nil ||
|
||||
// x.Units != nil ||
|
||||
// x.ListPatterns != nil ||
|
||||
// x.Collations != nil ||
|
||||
// x.Segmentations != nil ||
|
||||
// x.Rbnf != nil ||
|
||||
// x.Annotations != nil ||
|
||||
// x.Metadata != nil {
|
||||
|
||||
// TODO: support POSIX natively, albeit non-standard.
|
||||
tag := language.Make(strings.Replace(lang, "_POSIX", "-u-va-posix", 1))
|
||||
m[tag] = true
|
||||
// }
|
||||
}
|
||||
// Include locales for plural rules, which uses a different structure.
|
||||
for _, plurals := range data.Supplemental().Plurals {
|
||||
for _, rules := range plurals.PluralRules {
|
||||
for _, lang := range strings.Split(rules.Locales, " ") {
|
||||
m[language.Make(lang)] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var core, special []language.Tag
|
||||
|
||||
for t := range m {
|
||||
if x := t.Extensions(); len(x) != 0 && fmt.Sprint(x) != "[u-va-posix]" {
|
||||
log.Fatalf("Unexpected extension %v in %v", x, t)
|
||||
}
|
||||
if len(t.Variants()) == 0 && len(t.Extensions()) == 0 {
|
||||
core = append(core, t)
|
||||
} else {
|
||||
special = append(special, t)
|
||||
}
|
||||
}
|
||||
|
||||
w.WriteComment(`
|
||||
NumCompactTags is the number of common tags. The maximum tag is
|
||||
NumCompactTags-1.`)
|
||||
w.WriteConst("NumCompactTags", len(core)+len(special))
|
||||
|
||||
sort.Sort(byAlpha(special))
|
||||
w.WriteVar("specialTags", special)
|
||||
|
||||
// TODO: order by frequency?
|
||||
sort.Sort(byAlpha(core))
|
||||
|
||||
// Size computations are just an estimate.
|
||||
w.Size += int(reflect.TypeOf(map[uint32]uint16{}).Size())
|
||||
w.Size += len(core) * 6 // size of uint32 and uint16
|
||||
|
||||
fmt.Fprintln(w)
|
||||
fmt.Fprintln(w, "var coreTags = map[uint32]uint16{")
|
||||
fmt.Fprintln(w, "0x0: 0, // und")
|
||||
i := len(special) + 1 // Und and special tags already written.
|
||||
for _, t := range core {
|
||||
if t == language.Und {
|
||||
continue
|
||||
}
|
||||
fmt.Fprint(w.Hash, t, i)
|
||||
b, s, r := t.Raw()
|
||||
fmt.Fprintf(w, "0x%s%s%s: %d, // %s\n",
|
||||
getIndex(b, 3), // 3 is enough as it is guaranteed to be a compact number
|
||||
getIndex(s, 2),
|
||||
getIndex(r, 3),
|
||||
i, t)
|
||||
i++
|
||||
}
|
||||
fmt.Fprintln(w, "}")
|
||||
}
|
||||
|
||||
// getIndex prints the subtag type and extracts its index of size nibble.
|
||||
// If the index is less than n nibbles, the result is prefixed with 0s.
|
||||
func getIndex(x interface{}, n int) string {
|
||||
s := fmt.Sprintf("%#v", x) // s is of form Type{typeID: 0x00}
|
||||
s = s[strings.Index(s, "0x")+2 : len(s)-1]
|
||||
return strings.Repeat("0", n-len(s)) + s
|
||||
}
|
||||
|
||||
type byAlpha []language.Tag
|
||||
|
||||
func (a byAlpha) Len() int { return len(a) }
|
||||
func (a byAlpha) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
func (a byAlpha) Less(i, j int) bool { return a[i].String() < a[j].String() }
|
38
vendor/golang.org/x/text/language/go1_1.go
generated
vendored
Normal file
38
vendor/golang.org/x/text/language/go1_1.go
generated
vendored
Normal file
@ -0,0 +1,38 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !go1.2
|
||||
|
||||
package language
|
||||
|
||||
import "sort"
|
||||
|
||||
func sortStable(s sort.Interface) {
|
||||
ss := stableSort{
|
||||
s: s,
|
||||
pos: make([]int, s.Len()),
|
||||
}
|
||||
for i := range ss.pos {
|
||||
ss.pos[i] = i
|
||||
}
|
||||
sort.Sort(&ss)
|
||||
}
|
||||
|
||||
type stableSort struct {
|
||||
s sort.Interface
|
||||
pos []int
|
||||
}
|
||||
|
||||
func (s *stableSort) Len() int {
|
||||
return len(s.pos)
|
||||
}
|
||||
|
||||
func (s *stableSort) Less(i, j int) bool {
|
||||
return s.s.Less(i, j) || !s.s.Less(j, i) && s.pos[i] < s.pos[j]
|
||||
}
|
||||
|
||||
func (s *stableSort) Swap(i, j int) {
|
||||
s.s.Swap(i, j)
|
||||
s.pos[i], s.pos[j] = s.pos[j], s.pos[i]
|
||||
}
|
11
vendor/golang.org/x/text/language/go1_2.go
generated
vendored
Normal file
11
vendor/golang.org/x/text/language/go1_2.go
generated
vendored
Normal file
@ -0,0 +1,11 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build go1.2
|
||||
|
||||
package language
|
||||
|
||||
import "sort"
|
||||
|
||||
var sortStable = sort.Stable
|
48
vendor/golang.org/x/text/language/httpexample_test.go
generated
vendored
Normal file
48
vendor/golang.org/x/text/language/httpexample_test.go
generated
vendored
Normal file
@ -0,0 +1,48 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package language_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/text/language"
|
||||
)
|
||||
|
||||
// matcher is a language.Matcher configured for all supported languages.
|
||||
var matcher = language.NewMatcher([]language.Tag{
|
||||
language.BritishEnglish,
|
||||
language.Norwegian,
|
||||
language.German,
|
||||
})
|
||||
|
||||
// handler is a http.HandlerFunc.
|
||||
func handler(w http.ResponseWriter, r *http.Request) {
|
||||
t, q, err := language.ParseAcceptLanguage(r.Header.Get("Accept-Language"))
|
||||
// We ignore the error: the default language will be selected for t == nil.
|
||||
tag, _, _ := matcher.Match(t...)
|
||||
fmt.Printf("%5v (t: %6v; q: %3v; err: %v)\n", tag, t, q, err)
|
||||
}
|
||||
|
||||
func ExampleParseAcceptLanguage() {
|
||||
for _, al := range []string{
|
||||
"nn;q=0.3, en-us;q=0.8, en,",
|
||||
"gsw, en;q=0.7, en-US;q=0.8",
|
||||
"gsw, nl, da",
|
||||
"invalid",
|
||||
} {
|
||||
// Create dummy request with Accept-Language set and pass it to handler.
|
||||
r, _ := http.NewRequest("GET", "example.com", strings.NewReader("Hello"))
|
||||
r.Header.Set("Accept-Language", al)
|
||||
handler(nil, r)
|
||||
}
|
||||
|
||||
// Output:
|
||||
// en-GB (t: [ en en-US nn]; q: [ 1 0.8 0.3]; err: <nil>)
|
||||
// en-GB (t: [ gsw en-US en]; q: [ 1 0.8 0.7]; err: <nil>)
|
||||
// de (t: [ gsw nl da]; q: [ 1 1 1]; err: <nil>)
|
||||
// en-GB (t: []; q: []; err: language: tag is not well-formed)
|
||||
}
|
783
vendor/golang.org/x/text/language/index.go
generated
vendored
Normal file
783
vendor/golang.org/x/text/language/index.go
generated
vendored
Normal file
@ -0,0 +1,783 @@
|
||||
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
|
||||
|
||||
package language
|
||||
|
||||
// NumCompactTags is the number of common tags. The maximum tag is
|
||||
// NumCompactTags-1.
|
||||
const NumCompactTags = 768
|
||||
|
||||
var specialTags = []Tag{ // 2 elements
|
||||
0: {lang: 0xd7, region: 0x6e, script: 0x0, pVariant: 0x5, pExt: 0xe, str: "ca-ES-valencia"},
|
||||
1: {lang: 0x139, region: 0x135, script: 0x0, pVariant: 0x5, pExt: 0x5, str: "en-US-u-va-posix"},
|
||||
} // Size: 72 bytes
|
||||
|
||||
var coreTags = map[uint32]uint16{
|
||||
0x0: 0, // und
|
||||
0x01600000: 3, // af
|
||||
0x016000d2: 4, // af-NA
|
||||
0x01600161: 5, // af-ZA
|
||||
0x01c00000: 6, // agq
|
||||
0x01c00052: 7, // agq-CM
|
||||
0x02100000: 8, // ak
|
||||
0x02100080: 9, // ak-GH
|
||||
0x02700000: 10, // am
|
||||
0x0270006f: 11, // am-ET
|
||||
0x03a00000: 12, // ar
|
||||
0x03a00001: 13, // ar-001
|
||||
0x03a00023: 14, // ar-AE
|
||||
0x03a00039: 15, // ar-BH
|
||||
0x03a00062: 16, // ar-DJ
|
||||
0x03a00067: 17, // ar-DZ
|
||||
0x03a0006b: 18, // ar-EG
|
||||
0x03a0006c: 19, // ar-EH
|
||||
0x03a0006d: 20, // ar-ER
|
||||
0x03a00097: 21, // ar-IL
|
||||
0x03a0009b: 22, // ar-IQ
|
||||
0x03a000a1: 23, // ar-JO
|
||||
0x03a000a8: 24, // ar-KM
|
||||
0x03a000ac: 25, // ar-KW
|
||||
0x03a000b0: 26, // ar-LB
|
||||
0x03a000b9: 27, // ar-LY
|
||||
0x03a000ba: 28, // ar-MA
|
||||
0x03a000c9: 29, // ar-MR
|
||||
0x03a000e1: 30, // ar-OM
|
||||
0x03a000ed: 31, // ar-PS
|
||||
0x03a000f3: 32, // ar-QA
|
||||
0x03a00108: 33, // ar-SA
|
||||
0x03a0010b: 34, // ar-SD
|
||||
0x03a00115: 35, // ar-SO
|
||||
0x03a00117: 36, // ar-SS
|
||||
0x03a0011c: 37, // ar-SY
|
||||
0x03a00120: 38, // ar-TD
|
||||
0x03a00128: 39, // ar-TN
|
||||
0x03a0015e: 40, // ar-YE
|
||||
0x04000000: 41, // ars
|
||||
0x04300000: 42, // as
|
||||
0x04300099: 43, // as-IN
|
||||
0x04400000: 44, // asa
|
||||
0x0440012f: 45, // asa-TZ
|
||||
0x04800000: 46, // ast
|
||||
0x0480006e: 47, // ast-ES
|
||||
0x05800000: 48, // az
|
||||
0x0581f000: 49, // az-Cyrl
|
||||
0x0581f032: 50, // az-Cyrl-AZ
|
||||
0x05857000: 51, // az-Latn
|
||||
0x05857032: 52, // az-Latn-AZ
|
||||
0x05e00000: 53, // bas
|
||||
0x05e00052: 54, // bas-CM
|
||||
0x07100000: 55, // be
|
||||
0x07100047: 56, // be-BY
|
||||
0x07500000: 57, // bem
|
||||
0x07500162: 58, // bem-ZM
|
||||
0x07900000: 59, // bez
|
||||
0x0790012f: 60, // bez-TZ
|
||||
0x07e00000: 61, // bg
|
||||
0x07e00038: 62, // bg-BG
|
||||
0x08200000: 63, // bh
|
||||
0x0a000000: 64, // bm
|
||||
0x0a0000c3: 65, // bm-ML
|
||||
0x0a500000: 66, // bn
|
||||
0x0a500035: 67, // bn-BD
|
||||
0x0a500099: 68, // bn-IN
|
||||
0x0a900000: 69, // bo
|
||||
0x0a900053: 70, // bo-CN
|
||||
0x0a900099: 71, // bo-IN
|
||||
0x0b200000: 72, // br
|
||||
0x0b200078: 73, // br-FR
|
||||
0x0b500000: 74, // brx
|
||||
0x0b500099: 75, // brx-IN
|
||||
0x0b700000: 76, // bs
|
||||
0x0b71f000: 77, // bs-Cyrl
|
||||
0x0b71f033: 78, // bs-Cyrl-BA
|
||||
0x0b757000: 79, // bs-Latn
|
||||
0x0b757033: 80, // bs-Latn-BA
|
||||
0x0d700000: 81, // ca
|
||||
0x0d700022: 82, // ca-AD
|
||||
0x0d70006e: 83, // ca-ES
|
||||
0x0d700078: 84, // ca-FR
|
||||
0x0d70009e: 85, // ca-IT
|
||||
0x0db00000: 86, // ccp
|
||||
0x0db00035: 87, // ccp-BD
|
||||
0x0db00099: 88, // ccp-IN
|
||||
0x0dc00000: 89, // ce
|
||||
0x0dc00106: 90, // ce-RU
|
||||
0x0df00000: 91, // cgg
|
||||
0x0df00131: 92, // cgg-UG
|
||||
0x0e500000: 93, // chr
|
||||
0x0e500135: 94, // chr-US
|
||||
0x0e900000: 95, // ckb
|
||||
0x0e90009b: 96, // ckb-IQ
|
||||
0x0e90009c: 97, // ckb-IR
|
||||
0x0fa00000: 98, // cs
|
||||
0x0fa0005e: 99, // cs-CZ
|
||||
0x0fe00000: 100, // cu
|
||||
0x0fe00106: 101, // cu-RU
|
||||
0x10000000: 102, // cy
|
||||
0x1000007b: 103, // cy-GB
|
||||
0x10100000: 104, // da
|
||||
0x10100063: 105, // da-DK
|
||||
0x10100082: 106, // da-GL
|
||||
0x10800000: 107, // dav
|
||||
0x108000a4: 108, // dav-KE
|
||||
0x10d00000: 109, // de
|
||||
0x10d0002e: 110, // de-AT
|
||||
0x10d00036: 111, // de-BE
|
||||
0x10d0004e: 112, // de-CH
|
||||
0x10d00060: 113, // de-DE
|
||||
0x10d0009e: 114, // de-IT
|
||||
0x10d000b2: 115, // de-LI
|
||||
0x10d000b7: 116, // de-LU
|
||||
0x11700000: 117, // dje
|
||||
0x117000d4: 118, // dje-NE
|
||||
0x11f00000: 119, // dsb
|
||||
0x11f00060: 120, // dsb-DE
|
||||
0x12400000: 121, // dua
|
||||
0x12400052: 122, // dua-CM
|
||||
0x12800000: 123, // dv
|
||||
0x12b00000: 124, // dyo
|
||||
0x12b00114: 125, // dyo-SN
|
||||
0x12d00000: 126, // dz
|
||||
0x12d00043: 127, // dz-BT
|
||||
0x12f00000: 128, // ebu
|
||||
0x12f000a4: 129, // ebu-KE
|
||||
0x13000000: 130, // ee
|
||||
0x13000080: 131, // ee-GH
|
||||
0x13000122: 132, // ee-TG
|
||||
0x13600000: 133, // el
|
||||
0x1360005d: 134, // el-CY
|
||||
0x13600087: 135, // el-GR
|
||||
0x13900000: 136, // en
|
||||
0x13900001: 137, // en-001
|
||||
0x1390001a: 138, // en-150
|
||||
0x13900025: 139, // en-AG
|
||||
0x13900026: 140, // en-AI
|
||||
0x1390002d: 141, // en-AS
|
||||
0x1390002e: 142, // en-AT
|
||||
0x1390002f: 143, // en-AU
|
||||
0x13900034: 144, // en-BB
|
||||
0x13900036: 145, // en-BE
|
||||
0x1390003a: 146, // en-BI
|
||||
0x1390003d: 147, // en-BM
|
||||
0x13900042: 148, // en-BS
|
||||
0x13900046: 149, // en-BW
|
||||
0x13900048: 150, // en-BZ
|
||||
0x13900049: 151, // en-CA
|
||||
0x1390004a: 152, // en-CC
|
||||
0x1390004e: 153, // en-CH
|
||||
0x13900050: 154, // en-CK
|
||||
0x13900052: 155, // en-CM
|
||||
0x1390005c: 156, // en-CX
|
||||
0x1390005d: 157, // en-CY
|
||||
0x13900060: 158, // en-DE
|
||||
0x13900061: 159, // en-DG
|
||||
0x13900063: 160, // en-DK
|
||||
0x13900064: 161, // en-DM
|
||||
0x1390006d: 162, // en-ER
|
||||
0x13900072: 163, // en-FI
|
||||
0x13900073: 164, // en-FJ
|
||||
0x13900074: 165, // en-FK
|
||||
0x13900075: 166, // en-FM
|
||||
0x1390007b: 167, // en-GB
|
||||
0x1390007c: 168, // en-GD
|
||||
0x1390007f: 169, // en-GG
|
||||
0x13900080: 170, // en-GH
|
||||
0x13900081: 171, // en-GI
|
||||
0x13900083: 172, // en-GM
|
||||
0x1390008a: 173, // en-GU
|
||||
0x1390008c: 174, // en-GY
|
||||
0x1390008d: 175, // en-HK
|
||||
0x13900096: 176, // en-IE
|
||||
0x13900097: 177, // en-IL
|
||||
0x13900098: 178, // en-IM
|
||||
0x13900099: 179, // en-IN
|
||||
0x1390009a: 180, // en-IO
|
||||
0x1390009f: 181, // en-JE
|
||||
0x139000a0: 182, // en-JM
|
||||
0x139000a4: 183, // en-KE
|
||||
0x139000a7: 184, // en-KI
|
||||
0x139000a9: 185, // en-KN
|
||||
0x139000ad: 186, // en-KY
|
||||
0x139000b1: 187, // en-LC
|
||||
0x139000b4: 188, // en-LR
|
||||
0x139000b5: 189, // en-LS
|
||||
0x139000bf: 190, // en-MG
|
||||
0x139000c0: 191, // en-MH
|
||||
0x139000c6: 192, // en-MO
|
||||
0x139000c7: 193, // en-MP
|
||||
0x139000ca: 194, // en-MS
|
||||
0x139000cb: 195, // en-MT
|
||||
0x139000cc: 196, // en-MU
|
||||
0x139000ce: 197, // en-MW
|
||||
0x139000d0: 198, // en-MY
|
||||
0x139000d2: 199, // en-NA
|
||||
0x139000d5: 200, // en-NF
|
||||
0x139000d6: 201, // en-NG
|
||||
0x139000d9: 202, // en-NL
|
||||
0x139000dd: 203, // en-NR
|
||||
0x139000df: 204, // en-NU
|
||||
0x139000e0: 205, // en-NZ
|
||||
0x139000e6: 206, // en-PG
|
||||
0x139000e7: 207, // en-PH
|
||||
0x139000e8: 208, // en-PK
|
||||
0x139000eb: 209, // en-PN
|
||||
0x139000ec: 210, // en-PR
|
||||
0x139000f0: 211, // en-PW
|
||||
0x13900107: 212, // en-RW
|
||||
0x13900109: 213, // en-SB
|
||||
0x1390010a: 214, // en-SC
|
||||
0x1390010b: 215, // en-SD
|
||||
0x1390010c: 216, // en-SE
|
||||
0x1390010d: 217, // en-SG
|
||||
0x1390010e: 218, // en-SH
|
||||
0x1390010f: 219, // en-SI
|
||||
0x13900112: 220, // en-SL
|
||||
0x13900117: 221, // en-SS
|
||||
0x1390011b: 222, // en-SX
|
||||
0x1390011d: 223, // en-SZ
|
||||
0x1390011f: 224, // en-TC
|
||||
0x13900125: 225, // en-TK
|
||||
0x13900129: 226, // en-TO
|
||||
0x1390012c: 227, // en-TT
|
||||
0x1390012d: 228, // en-TV
|
||||
0x1390012f: 229, // en-TZ
|
||||
0x13900131: 230, // en-UG
|
||||
0x13900133: 231, // en-UM
|
||||
0x13900135: 232, // en-US
|
||||
0x13900139: 233, // en-VC
|
||||
0x1390013c: 234, // en-VG
|
||||
0x1390013d: 235, // en-VI
|
||||
0x1390013f: 236, // en-VU
|
||||
0x13900142: 237, // en-WS
|
||||
0x13900161: 238, // en-ZA
|
||||
0x13900162: 239, // en-ZM
|
||||
0x13900164: 240, // en-ZW
|
||||
0x13c00000: 241, // eo
|
||||
0x13c00001: 242, // eo-001
|
||||
0x13e00000: 243, // es
|
||||
0x13e0001f: 244, // es-419
|
||||
0x13e0002c: 245, // es-AR
|
||||
0x13e0003f: 246, // es-BO
|
||||
0x13e00041: 247, // es-BR
|
||||
0x13e00048: 248, // es-BZ
|
||||
0x13e00051: 249, // es-CL
|
||||
0x13e00054: 250, // es-CO
|
||||
0x13e00056: 251, // es-CR
|
||||
0x13e00059: 252, // es-CU
|
||||
0x13e00065: 253, // es-DO
|
||||
0x13e00068: 254, // es-EA
|
||||
0x13e00069: 255, // es-EC
|
||||
0x13e0006e: 256, // es-ES
|
||||
0x13e00086: 257, // es-GQ
|
||||
0x13e00089: 258, // es-GT
|
||||
0x13e0008f: 259, // es-HN
|
||||
0x13e00094: 260, // es-IC
|
||||
0x13e000cf: 261, // es-MX
|
||||
0x13e000d8: 262, // es-NI
|
||||
0x13e000e2: 263, // es-PA
|
||||
0x13e000e4: 264, // es-PE
|
||||
0x13e000e7: 265, // es-PH
|
||||
0x13e000ec: 266, // es-PR
|
||||
0x13e000f1: 267, // es-PY
|
||||
0x13e0011a: 268, // es-SV
|
||||
0x13e00135: 269, // es-US
|
||||
0x13e00136: 270, // es-UY
|
||||
0x13e0013b: 271, // es-VE
|
||||
0x14000000: 272, // et
|
||||
0x1400006a: 273, // et-EE
|
||||
0x14500000: 274, // eu
|
||||
0x1450006e: 275, // eu-ES
|
||||
0x14600000: 276, // ewo
|
||||
0x14600052: 277, // ewo-CM
|
||||
0x14800000: 278, // fa
|
||||
0x14800024: 279, // fa-AF
|
||||
0x1480009c: 280, // fa-IR
|
||||
0x14e00000: 281, // ff
|
||||
0x14e00052: 282, // ff-CM
|
||||
0x14e00084: 283, // ff-GN
|
||||
0x14e000c9: 284, // ff-MR
|
||||
0x14e00114: 285, // ff-SN
|
||||
0x15100000: 286, // fi
|
||||
0x15100072: 287, // fi-FI
|
||||
0x15300000: 288, // fil
|
||||
0x153000e7: 289, // fil-PH
|
||||
0x15800000: 290, // fo
|
||||
0x15800063: 291, // fo-DK
|
||||
0x15800076: 292, // fo-FO
|
||||
0x15e00000: 293, // fr
|
||||
0x15e00036: 294, // fr-BE
|
||||
0x15e00037: 295, // fr-BF
|
||||
0x15e0003a: 296, // fr-BI
|
||||
0x15e0003b: 297, // fr-BJ
|
||||
0x15e0003c: 298, // fr-BL
|
||||
0x15e00049: 299, // fr-CA
|
||||
0x15e0004b: 300, // fr-CD
|
||||
0x15e0004c: 301, // fr-CF
|
||||
0x15e0004d: 302, // fr-CG
|
||||
0x15e0004e: 303, // fr-CH
|
||||
0x15e0004f: 304, // fr-CI
|
||||
0x15e00052: 305, // fr-CM
|
||||
0x15e00062: 306, // fr-DJ
|
||||
0x15e00067: 307, // fr-DZ
|
||||
0x15e00078: 308, // fr-FR
|
||||
0x15e0007a: 309, // fr-GA
|
||||
0x15e0007e: 310, // fr-GF
|
||||
0x15e00084: 311, // fr-GN
|
||||
0x15e00085: 312, // fr-GP
|
||||
0x15e00086: 313, // fr-GQ
|
||||
0x15e00091: 314, // fr-HT
|
||||
0x15e000a8: 315, // fr-KM
|
||||
0x15e000b7: 316, // fr-LU
|
||||
0x15e000ba: 317, // fr-MA
|
||||
0x15e000bb: 318, // fr-MC
|
||||
0x15e000be: 319, // fr-MF
|
||||
0x15e000bf: 320, // fr-MG
|
||||
0x15e000c3: 321, // fr-ML
|
||||
0x15e000c8: 322, // fr-MQ
|
||||
0x15e000c9: 323, // fr-MR
|
||||
0x15e000cc: 324, // fr-MU
|
||||
0x15e000d3: 325, // fr-NC
|
||||
0x15e000d4: 326, // fr-NE
|
||||
0x15e000e5: 327, // fr-PF
|
||||
0x15e000ea: 328, // fr-PM
|
||||
0x15e00102: 329, // fr-RE
|
||||
0x15e00107: 330, // fr-RW
|
||||
0x15e0010a: 331, // fr-SC
|
||||
0x15e00114: 332, // fr-SN
|
||||
0x15e0011c: 333, // fr-SY
|
||||
0x15e00120: 334, // fr-TD
|
||||
0x15e00122: 335, // fr-TG
|
||||
0x15e00128: 336, // fr-TN
|
||||
0x15e0013f: 337, // fr-VU
|
||||
0x15e00140: 338, // fr-WF
|
||||
0x15e0015f: 339, // fr-YT
|
||||
0x16900000: 340, // fur
|
||||
0x1690009e: 341, // fur-IT
|
||||
0x16d00000: 342, // fy
|
||||
0x16d000d9: 343, // fy-NL
|
||||
0x16e00000: 344, // ga
|
||||
0x16e00096: 345, // ga-IE
|
||||
0x17e00000: 346, // gd
|
||||
0x17e0007b: 347, // gd-GB
|
||||
0x19000000: 348, // gl
|
||||
0x1900006e: 349, // gl-ES
|
||||
0x1a300000: 350, // gsw
|
||||
0x1a30004e: 351, // gsw-CH
|
||||
0x1a300078: 352, // gsw-FR
|
||||
0x1a3000b2: 353, // gsw-LI
|
||||
0x1a400000: 354, // gu
|
||||
0x1a400099: 355, // gu-IN
|
||||
0x1a900000: 356, // guw
|
||||
0x1ab00000: 357, // guz
|
||||
0x1ab000a4: 358, // guz-KE
|
||||
0x1ac00000: 359, // gv
|
||||
0x1ac00098: 360, // gv-IM
|
||||
0x1b400000: 361, // ha
|
||||
0x1b400080: 362, // ha-GH
|
||||
0x1b4000d4: 363, // ha-NE
|
||||
0x1b4000d6: 364, // ha-NG
|
||||
0x1b800000: 365, // haw
|
||||
0x1b800135: 366, // haw-US
|
||||
0x1bc00000: 367, // he
|
||||
0x1bc00097: 368, // he-IL
|
||||
0x1be00000: 369, // hi
|
||||
0x1be00099: 370, // hi-IN
|
||||
0x1d100000: 371, // hr
|
||||
0x1d100033: 372, // hr-BA
|
||||
0x1d100090: 373, // hr-HR
|
||||
0x1d200000: 374, // hsb
|
||||
0x1d200060: 375, // hsb-DE
|
||||
0x1d500000: 376, // hu
|
||||
0x1d500092: 377, // hu-HU
|
||||
0x1d700000: 378, // hy
|
||||
0x1d700028: 379, // hy-AM
|
||||
0x1e100000: 380, // id
|
||||
0x1e100095: 381, // id-ID
|
||||
0x1e700000: 382, // ig
|
||||
0x1e7000d6: 383, // ig-NG
|
||||
0x1ea00000: 384, // ii
|
||||
0x1ea00053: 385, // ii-CN
|
||||
0x1f500000: 386, // io
|
||||
0x1f800000: 387, // is
|
||||
0x1f80009d: 388, // is-IS
|
||||
0x1f900000: 389, // it
|
||||
0x1f90004e: 390, // it-CH
|
||||
0x1f90009e: 391, // it-IT
|
||||
0x1f900113: 392, // it-SM
|
||||
0x1f900138: 393, // it-VA
|
||||
0x1fa00000: 394, // iu
|
||||
0x20000000: 395, // ja
|
||||
0x200000a2: 396, // ja-JP
|
||||
0x20300000: 397, // jbo
|
||||
0x20700000: 398, // jgo
|
||||
0x20700052: 399, // jgo-CM
|
||||
0x20a00000: 400, // jmc
|
||||
0x20a0012f: 401, // jmc-TZ
|
||||
0x20e00000: 402, // jv
|
||||
0x21000000: 403, // ka
|
||||
0x2100007d: 404, // ka-GE
|
||||
0x21200000: 405, // kab
|
||||
0x21200067: 406, // kab-DZ
|
||||
0x21600000: 407, // kaj
|
||||
0x21700000: 408, // kam
|
||||
0x217000a4: 409, // kam-KE
|
||||
0x21f00000: 410, // kcg
|
||||
0x22300000: 411, // kde
|
||||
0x2230012f: 412, // kde-TZ
|
||||
0x22700000: 413, // kea
|
||||
0x2270005a: 414, // kea-CV
|
||||
0x23400000: 415, // khq
|
||||
0x234000c3: 416, // khq-ML
|
||||
0x23900000: 417, // ki
|
||||
0x239000a4: 418, // ki-KE
|
||||
0x24200000: 419, // kk
|
||||
0x242000ae: 420, // kk-KZ
|
||||
0x24400000: 421, // kkj
|
||||
0x24400052: 422, // kkj-CM
|
||||
0x24500000: 423, // kl
|
||||
0x24500082: 424, // kl-GL
|
||||
0x24600000: 425, // kln
|
||||
0x246000a4: 426, // kln-KE
|
||||
0x24a00000: 427, // km
|
||||
0x24a000a6: 428, // km-KH
|
||||
0x25100000: 429, // kn
|
||||
0x25100099: 430, // kn-IN
|
||||
0x25400000: 431, // ko
|
||||
0x254000aa: 432, // ko-KP
|
||||
0x254000ab: 433, // ko-KR
|
||||
0x25600000: 434, // kok
|
||||
0x25600099: 435, // kok-IN
|
||||
0x26a00000: 436, // ks
|
||||
0x26a00099: 437, // ks-IN
|
||||
0x26b00000: 438, // ksb
|
||||
0x26b0012f: 439, // ksb-TZ
|
||||
0x26d00000: 440, // ksf
|
||||
0x26d00052: 441, // ksf-CM
|
||||
0x26e00000: 442, // ksh
|
||||
0x26e00060: 443, // ksh-DE
|
||||
0x27400000: 444, // ku
|
||||
0x28100000: 445, // kw
|
||||
0x2810007b: 446, // kw-GB
|
||||
0x28a00000: 447, // ky
|
||||
0x28a000a5: 448, // ky-KG
|
||||
0x29100000: 449, // lag
|
||||
0x2910012f: 450, // lag-TZ
|
||||
0x29500000: 451, // lb
|
||||
0x295000b7: 452, // lb-LU
|
||||
0x2a300000: 453, // lg
|
||||
0x2a300131: 454, // lg-UG
|
||||
0x2af00000: 455, // lkt
|
||||
0x2af00135: 456, // lkt-US
|
||||
0x2b500000: 457, // ln
|
||||
0x2b50002a: 458, // ln-AO
|
||||
0x2b50004b: 459, // ln-CD
|
||||
0x2b50004c: 460, // ln-CF
|
||||
0x2b50004d: 461, // ln-CG
|
||||
0x2b800000: 462, // lo
|
||||
0x2b8000af: 463, // lo-LA
|
||||
0x2bf00000: 464, // lrc
|
||||
0x2bf0009b: 465, // lrc-IQ
|
||||
0x2bf0009c: 466, // lrc-IR
|
||||
0x2c000000: 467, // lt
|
||||
0x2c0000b6: 468, // lt-LT
|
||||
0x2c200000: 469, // lu
|
||||
0x2c20004b: 470, // lu-CD
|
||||
0x2c400000: 471, // luo
|
||||
0x2c4000a4: 472, // luo-KE
|
||||
0x2c500000: 473, // luy
|
||||
0x2c5000a4: 474, // luy-KE
|
||||
0x2c700000: 475, // lv
|
||||
0x2c7000b8: 476, // lv-LV
|
||||
0x2d100000: 477, // mas
|
||||
0x2d1000a4: 478, // mas-KE
|
||||
0x2d10012f: 479, // mas-TZ
|
||||
0x2e900000: 480, // mer
|
||||
0x2e9000a4: 481, // mer-KE
|
||||
0x2ed00000: 482, // mfe
|
||||
0x2ed000cc: 483, // mfe-MU
|
||||
0x2f100000: 484, // mg
|
||||
0x2f1000bf: 485, // mg-MG
|
||||
0x2f200000: 486, // mgh
|
||||
0x2f2000d1: 487, // mgh-MZ
|
||||
0x2f400000: 488, // mgo
|
||||
0x2f400052: 489, // mgo-CM
|
||||
0x2ff00000: 490, // mk
|
||||
0x2ff000c2: 491, // mk-MK
|
||||
0x30400000: 492, // ml
|
||||
0x30400099: 493, // ml-IN
|
||||
0x30b00000: 494, // mn
|
||||
0x30b000c5: 495, // mn-MN
|
||||
0x31b00000: 496, // mr
|
||||
0x31b00099: 497, // mr-IN
|
||||
0x31f00000: 498, // ms
|
||||
0x31f0003e: 499, // ms-BN
|
||||
0x31f000d0: 500, // ms-MY
|
||||
0x31f0010d: 501, // ms-SG
|
||||
0x32000000: 502, // mt
|
||||
0x320000cb: 503, // mt-MT
|
||||
0x32500000: 504, // mua
|
||||
0x32500052: 505, // mua-CM
|
||||
0x33100000: 506, // my
|
||||
0x331000c4: 507, // my-MM
|
||||
0x33a00000: 508, // mzn
|
||||
0x33a0009c: 509, // mzn-IR
|
||||
0x34100000: 510, // nah
|
||||
0x34500000: 511, // naq
|
||||
0x345000d2: 512, // naq-NA
|
||||
0x34700000: 513, // nb
|
||||
0x347000da: 514, // nb-NO
|
||||
0x34700110: 515, // nb-SJ
|
||||
0x34e00000: 516, // nd
|
||||
0x34e00164: 517, // nd-ZW
|
||||
0x35000000: 518, // nds
|
||||
0x35000060: 519, // nds-DE
|
||||
0x350000d9: 520, // nds-NL
|
||||
0x35100000: 521, // ne
|
||||
0x35100099: 522, // ne-IN
|
||||
0x351000db: 523, // ne-NP
|
||||
0x36700000: 524, // nl
|
||||
0x36700030: 525, // nl-AW
|
||||
0x36700036: 526, // nl-BE
|
||||
0x36700040: 527, // nl-BQ
|
||||
0x3670005b: 528, // nl-CW
|
||||
0x367000d9: 529, // nl-NL
|
||||
0x36700116: 530, // nl-SR
|
||||
0x3670011b: 531, // nl-SX
|
||||
0x36800000: 532, // nmg
|
||||
0x36800052: 533, // nmg-CM
|
||||
0x36a00000: 534, // nn
|
||||
0x36a000da: 535, // nn-NO
|
||||
0x36c00000: 536, // nnh
|
||||
0x36c00052: 537, // nnh-CM
|
||||
0x36f00000: 538, // no
|
||||
0x37500000: 539, // nqo
|
||||
0x37600000: 540, // nr
|
||||
0x37a00000: 541, // nso
|
||||
0x38000000: 542, // nus
|
||||
0x38000117: 543, // nus-SS
|
||||
0x38700000: 544, // ny
|
||||
0x38900000: 545, // nyn
|
||||
0x38900131: 546, // nyn-UG
|
||||
0x39000000: 547, // om
|
||||
0x3900006f: 548, // om-ET
|
||||
0x390000a4: 549, // om-KE
|
||||
0x39500000: 550, // or
|
||||
0x39500099: 551, // or-IN
|
||||
0x39800000: 552, // os
|
||||
0x3980007d: 553, // os-GE
|
||||
0x39800106: 554, // os-RU
|
||||
0x39d00000: 555, // pa
|
||||
0x39d05000: 556, // pa-Arab
|
||||
0x39d050e8: 557, // pa-Arab-PK
|
||||
0x39d33000: 558, // pa-Guru
|
||||
0x39d33099: 559, // pa-Guru-IN
|
||||
0x3a100000: 560, // pap
|
||||
0x3b300000: 561, // pl
|
||||
0x3b3000e9: 562, // pl-PL
|
||||
0x3bd00000: 563, // prg
|
||||
0x3bd00001: 564, // prg-001
|
||||
0x3be00000: 565, // ps
|
||||
0x3be00024: 566, // ps-AF
|
||||
0x3c000000: 567, // pt
|
||||
0x3c00002a: 568, // pt-AO
|
||||
0x3c000041: 569, // pt-BR
|
||||
0x3c00004e: 570, // pt-CH
|
||||
0x3c00005a: 571, // pt-CV
|
||||
0x3c000086: 572, // pt-GQ
|
||||
0x3c00008b: 573, // pt-GW
|
||||
0x3c0000b7: 574, // pt-LU
|
||||
0x3c0000c6: 575, // pt-MO
|
||||
0x3c0000d1: 576, // pt-MZ
|
||||
0x3c0000ee: 577, // pt-PT
|
||||
0x3c000118: 578, // pt-ST
|
||||
0x3c000126: 579, // pt-TL
|
||||
0x3c400000: 580, // qu
|
||||
0x3c40003f: 581, // qu-BO
|
||||
0x3c400069: 582, // qu-EC
|
||||
0x3c4000e4: 583, // qu-PE
|
||||
0x3d400000: 584, // rm
|
||||
0x3d40004e: 585, // rm-CH
|
||||
0x3d900000: 586, // rn
|
||||
0x3d90003a: 587, // rn-BI
|
||||
0x3dc00000: 588, // ro
|
||||
0x3dc000bc: 589, // ro-MD
|
||||
0x3dc00104: 590, // ro-RO
|
||||
0x3de00000: 591, // rof
|
||||
0x3de0012f: 592, // rof-TZ
|
||||
0x3e200000: 593, // ru
|
||||
0x3e200047: 594, // ru-BY
|
||||
0x3e2000a5: 595, // ru-KG
|
||||
0x3e2000ae: 596, // ru-KZ
|
||||
0x3e2000bc: 597, // ru-MD
|
||||
0x3e200106: 598, // ru-RU
|
||||
0x3e200130: 599, // ru-UA
|
||||
0x3e500000: 600, // rw
|
||||
0x3e500107: 601, // rw-RW
|
||||
0x3e600000: 602, // rwk
|
||||
0x3e60012f: 603, // rwk-TZ
|
||||
0x3eb00000: 604, // sah
|
||||
0x3eb00106: 605, // sah-RU
|
||||
0x3ec00000: 606, // saq
|
||||
0x3ec000a4: 607, // saq-KE
|
||||
0x3f300000: 608, // sbp
|
||||
0x3f30012f: 609, // sbp-TZ
|
||||
0x3fa00000: 610, // sd
|
||||
0x3fa000e8: 611, // sd-PK
|
||||
0x3fc00000: 612, // sdh
|
||||
0x3fd00000: 613, // se
|
||||
0x3fd00072: 614, // se-FI
|
||||
0x3fd000da: 615, // se-NO
|
||||
0x3fd0010c: 616, // se-SE
|
||||
0x3ff00000: 617, // seh
|
||||
0x3ff000d1: 618, // seh-MZ
|
||||
0x40100000: 619, // ses
|
||||
0x401000c3: 620, // ses-ML
|
||||
0x40200000: 621, // sg
|
||||
0x4020004c: 622, // sg-CF
|
||||
0x40800000: 623, // shi
|
||||
0x40857000: 624, // shi-Latn
|
||||
0x408570ba: 625, // shi-Latn-MA
|
||||
0x408dc000: 626, // shi-Tfng
|
||||
0x408dc0ba: 627, // shi-Tfng-MA
|
||||
0x40c00000: 628, // si
|
||||
0x40c000b3: 629, // si-LK
|
||||
0x41200000: 630, // sk
|
||||
0x41200111: 631, // sk-SK
|
||||
0x41600000: 632, // sl
|
||||
0x4160010f: 633, // sl-SI
|
||||
0x41c00000: 634, // sma
|
||||
0x41d00000: 635, // smi
|
||||
0x41e00000: 636, // smj
|
||||
0x41f00000: 637, // smn
|
||||
0x41f00072: 638, // smn-FI
|
||||
0x42200000: 639, // sms
|
||||
0x42300000: 640, // sn
|
||||
0x42300164: 641, // sn-ZW
|
||||
0x42900000: 642, // so
|
||||
0x42900062: 643, // so-DJ
|
||||
0x4290006f: 644, // so-ET
|
||||
0x429000a4: 645, // so-KE
|
||||
0x42900115: 646, // so-SO
|
||||
0x43100000: 647, // sq
|
||||
0x43100027: 648, // sq-AL
|
||||
0x431000c2: 649, // sq-MK
|
||||
0x4310014d: 650, // sq-XK
|
||||
0x43200000: 651, // sr
|
||||
0x4321f000: 652, // sr-Cyrl
|
||||
0x4321f033: 653, // sr-Cyrl-BA
|
||||
0x4321f0bd: 654, // sr-Cyrl-ME
|
||||
0x4321f105: 655, // sr-Cyrl-RS
|
||||
0x4321f14d: 656, // sr-Cyrl-XK
|
||||
0x43257000: 657, // sr-Latn
|
||||
0x43257033: 658, // sr-Latn-BA
|
||||
0x432570bd: 659, // sr-Latn-ME
|
||||
0x43257105: 660, // sr-Latn-RS
|
||||
0x4325714d: 661, // sr-Latn-XK
|
||||
0x43700000: 662, // ss
|
||||
0x43a00000: 663, // ssy
|
||||
0x43b00000: 664, // st
|
||||
0x44400000: 665, // sv
|
||||
0x44400031: 666, // sv-AX
|
||||
0x44400072: 667, // sv-FI
|
||||
0x4440010c: 668, // sv-SE
|
||||
0x44500000: 669, // sw
|
||||
0x4450004b: 670, // sw-CD
|
||||
0x445000a4: 671, // sw-KE
|
||||
0x4450012f: 672, // sw-TZ
|
||||
0x44500131: 673, // sw-UG
|
||||
0x44e00000: 674, // syr
|
||||
0x45000000: 675, // ta
|
||||
0x45000099: 676, // ta-IN
|
||||
0x450000b3: 677, // ta-LK
|
||||
0x450000d0: 678, // ta-MY
|
||||
0x4500010d: 679, // ta-SG
|
||||
0x46100000: 680, // te
|
||||
0x46100099: 681, // te-IN
|
||||
0x46400000: 682, // teo
|
||||
0x464000a4: 683, // teo-KE
|
||||
0x46400131: 684, // teo-UG
|
||||
0x46700000: 685, // tg
|
||||
0x46700124: 686, // tg-TJ
|
||||
0x46b00000: 687, // th
|
||||
0x46b00123: 688, // th-TH
|
||||
0x46f00000: 689, // ti
|
||||
0x46f0006d: 690, // ti-ER
|
||||
0x46f0006f: 691, // ti-ET
|
||||
0x47100000: 692, // tig
|
||||
0x47600000: 693, // tk
|
||||
0x47600127: 694, // tk-TM
|
||||
0x48000000: 695, // tn
|
||||
0x48200000: 696, // to
|
||||
0x48200129: 697, // to-TO
|
||||
0x48a00000: 698, // tr
|
||||
0x48a0005d: 699, // tr-CY
|
||||
0x48a0012b: 700, // tr-TR
|
||||
0x48e00000: 701, // ts
|
||||
0x49400000: 702, // tt
|
||||
0x49400106: 703, // tt-RU
|
||||
0x4a400000: 704, // twq
|
||||
0x4a4000d4: 705, // twq-NE
|
||||
0x4a900000: 706, // tzm
|
||||
0x4a9000ba: 707, // tzm-MA
|
||||
0x4ac00000: 708, // ug
|
||||
0x4ac00053: 709, // ug-CN
|
||||
0x4ae00000: 710, // uk
|
||||
0x4ae00130: 711, // uk-UA
|
||||
0x4b400000: 712, // ur
|
||||
0x4b400099: 713, // ur-IN
|
||||
0x4b4000e8: 714, // ur-PK
|
||||
0x4bc00000: 715, // uz
|
||||
0x4bc05000: 716, // uz-Arab
|
||||
0x4bc05024: 717, // uz-Arab-AF
|
||||
0x4bc1f000: 718, // uz-Cyrl
|
||||
0x4bc1f137: 719, // uz-Cyrl-UZ
|
||||
0x4bc57000: 720, // uz-Latn
|
||||
0x4bc57137: 721, // uz-Latn-UZ
|
||||
0x4be00000: 722, // vai
|
||||
0x4be57000: 723, // vai-Latn
|
||||
0x4be570b4: 724, // vai-Latn-LR
|
||||
0x4bee3000: 725, // vai-Vaii
|
||||
0x4bee30b4: 726, // vai-Vaii-LR
|
||||
0x4c000000: 727, // ve
|
||||
0x4c300000: 728, // vi
|
||||
0x4c30013e: 729, // vi-VN
|
||||
0x4c900000: 730, // vo
|
||||
0x4c900001: 731, // vo-001
|
||||
0x4cc00000: 732, // vun
|
||||
0x4cc0012f: 733, // vun-TZ
|
||||
0x4ce00000: 734, // wa
|
||||
0x4cf00000: 735, // wae
|
||||
0x4cf0004e: 736, // wae-CH
|
||||
0x4e500000: 737, // wo
|
||||
0x4e500114: 738, // wo-SN
|
||||
0x4f200000: 739, // xh
|
||||
0x4fb00000: 740, // xog
|
||||
0x4fb00131: 741, // xog-UG
|
||||
0x50900000: 742, // yav
|
||||
0x50900052: 743, // yav-CM
|
||||
0x51200000: 744, // yi
|
||||
0x51200001: 745, // yi-001
|
||||
0x51800000: 746, // yo
|
||||
0x5180003b: 747, // yo-BJ
|
||||
0x518000d6: 748, // yo-NG
|
||||
0x51f00000: 749, // yue
|
||||
0x51f38000: 750, // yue-Hans
|
||||
0x51f38053: 751, // yue-Hans-CN
|
||||
0x51f39000: 752, // yue-Hant
|
||||
0x51f3908d: 753, // yue-Hant-HK
|
||||
0x52800000: 754, // zgh
|
||||
0x528000ba: 755, // zgh-MA
|
||||
0x52900000: 756, // zh
|
||||
0x52938000: 757, // zh-Hans
|
||||
0x52938053: 758, // zh-Hans-CN
|
||||
0x5293808d: 759, // zh-Hans-HK
|
||||
0x529380c6: 760, // zh-Hans-MO
|
||||
0x5293810d: 761, // zh-Hans-SG
|
||||
0x52939000: 762, // zh-Hant
|
||||
0x5293908d: 763, // zh-Hant-HK
|
||||
0x529390c6: 764, // zh-Hant-MO
|
||||
0x5293912e: 765, // zh-Hant-TW
|
||||
0x52f00000: 766, // zu
|
||||
0x52f00161: 767, // zu-ZA
|
||||
}
|
||||
|
||||
// Total table size 4676 bytes (4KiB); checksum: 17BE3673
|
907
vendor/golang.org/x/text/language/language.go
generated
vendored
Normal file
907
vendor/golang.org/x/text/language/language.go
generated
vendored
Normal file
@ -0,0 +1,907 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:generate go run gen.go gen_common.go -output tables.go
|
||||
//go:generate go run gen_index.go
|
||||
|
||||
package language
|
||||
|
||||
// TODO: Remove above NOTE after:
|
||||
// - verifying that tables are dropped correctly (most notably matcher tables).
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
// maxCoreSize is the maximum size of a BCP 47 tag without variants and
|
||||
// extensions. Equals max lang (3) + script (4) + max reg (3) + 2 dashes.
|
||||
maxCoreSize = 12
|
||||
|
||||
// max99thPercentileSize is a somewhat arbitrary buffer size that presumably
|
||||
// is large enough to hold at least 99% of the BCP 47 tags.
|
||||
max99thPercentileSize = 32
|
||||
|
||||
// maxSimpleUExtensionSize is the maximum size of a -u extension with one
|
||||
// key-type pair. Equals len("-u-") + key (2) + dash + max value (8).
|
||||
maxSimpleUExtensionSize = 14
|
||||
)
|
||||
|
||||
// Tag represents a BCP 47 language tag. It is used to specify an instance of a
|
||||
// specific language or locale. All language tag values are guaranteed to be
|
||||
// well-formed.
|
||||
type Tag struct {
|
||||
lang langID
|
||||
region regionID
|
||||
// TODO: we will soon run out of positions for script. Idea: instead of
|
||||
// storing lang, region, and script codes, store only the compact index and
|
||||
// have a lookup table from this code to its expansion. This greatly speeds
|
||||
// up table lookup, speed up common variant cases.
|
||||
// This will also immediately free up 3 extra bytes. Also, the pVariant
|
||||
// field can now be moved to the lookup table, as the compact index uniquely
|
||||
// determines the offset of a possible variant.
|
||||
script scriptID
|
||||
pVariant byte // offset in str, includes preceding '-'
|
||||
pExt uint16 // offset of first extension, includes preceding '-'
|
||||
|
||||
// str is the string representation of the Tag. It will only be used if the
|
||||
// tag has variants or extensions.
|
||||
str string
|
||||
}
|
||||
|
||||
// Make is a convenience wrapper for Parse that omits the error.
|
||||
// In case of an error, a sensible default is returned.
|
||||
func Make(s string) Tag {
|
||||
return Default.Make(s)
|
||||
}
|
||||
|
||||
// Make is a convenience wrapper for c.Parse that omits the error.
|
||||
// In case of an error, a sensible default is returned.
|
||||
func (c CanonType) Make(s string) Tag {
|
||||
t, _ := c.Parse(s)
|
||||
return t
|
||||
}
|
||||
|
||||
// Raw returns the raw base language, script and region, without making an
|
||||
// attempt to infer their values.
|
||||
func (t Tag) Raw() (b Base, s Script, r Region) {
|
||||
return Base{t.lang}, Script{t.script}, Region{t.region}
|
||||
}
|
||||
|
||||
// equalTags compares language, script and region subtags only.
|
||||
func (t Tag) equalTags(a Tag) bool {
|
||||
return t.lang == a.lang && t.script == a.script && t.region == a.region
|
||||
}
|
||||
|
||||
// IsRoot returns true if t is equal to language "und".
|
||||
func (t Tag) IsRoot() bool {
|
||||
if int(t.pVariant) < len(t.str) {
|
||||
return false
|
||||
}
|
||||
return t.equalTags(und)
|
||||
}
|
||||
|
||||
// private reports whether the Tag consists solely of a private use tag.
|
||||
func (t Tag) private() bool {
|
||||
return t.str != "" && t.pVariant == 0
|
||||
}
|
||||
|
||||
// CanonType can be used to enable or disable various types of canonicalization.
|
||||
type CanonType int
|
||||
|
||||
const (
|
||||
// Replace deprecated base languages with their preferred replacements.
|
||||
DeprecatedBase CanonType = 1 << iota
|
||||
// Replace deprecated scripts with their preferred replacements.
|
||||
DeprecatedScript
|
||||
// Replace deprecated regions with their preferred replacements.
|
||||
DeprecatedRegion
|
||||
// Remove redundant scripts.
|
||||
SuppressScript
|
||||
// Normalize legacy encodings. This includes legacy languages defined in
|
||||
// CLDR as well as bibliographic codes defined in ISO-639.
|
||||
Legacy
|
||||
// Map the dominant language of a macro language group to the macro language
|
||||
// subtag. For example cmn -> zh.
|
||||
Macro
|
||||
// The CLDR flag should be used if full compatibility with CLDR is required.
|
||||
// There are a few cases where language.Tag may differ from CLDR. To follow all
|
||||
// of CLDR's suggestions, use All|CLDR.
|
||||
CLDR
|
||||
|
||||
// Raw can be used to Compose or Parse without Canonicalization.
|
||||
Raw CanonType = 0
|
||||
|
||||
// Replace all deprecated tags with their preferred replacements.
|
||||
Deprecated = DeprecatedBase | DeprecatedScript | DeprecatedRegion
|
||||
|
||||
// All canonicalizations recommended by BCP 47.
|
||||
BCP47 = Deprecated | SuppressScript
|
||||
|
||||
// All canonicalizations.
|
||||
All = BCP47 | Legacy | Macro
|
||||
|
||||
// Default is the canonicalization used by Parse, Make and Compose. To
|
||||
// preserve as much information as possible, canonicalizations that remove
|
||||
// potentially valuable information are not included. The Matcher is
|
||||
// designed to recognize similar tags that would be the same if
|
||||
// they were canonicalized using All.
|
||||
Default = Deprecated | Legacy
|
||||
|
||||
canonLang = DeprecatedBase | Legacy | Macro
|
||||
|
||||
// TODO: LikelyScript, LikelyRegion: suppress similar to ICU.
|
||||
)
|
||||
|
||||
// canonicalize returns the canonicalized equivalent of the tag and
|
||||
// whether there was any change.
|
||||
func (t Tag) canonicalize(c CanonType) (Tag, bool) {
|
||||
if c == Raw {
|
||||
return t, false
|
||||
}
|
||||
changed := false
|
||||
if c&SuppressScript != 0 {
|
||||
if t.lang < langNoIndexOffset && uint8(t.script) == suppressScript[t.lang] {
|
||||
t.script = 0
|
||||
changed = true
|
||||
}
|
||||
}
|
||||
if c&canonLang != 0 {
|
||||
for {
|
||||
if l, aliasType := normLang(t.lang); l != t.lang {
|
||||
switch aliasType {
|
||||
case langLegacy:
|
||||
if c&Legacy != 0 {
|
||||
if t.lang == _sh && t.script == 0 {
|
||||
t.script = _Latn
|
||||
}
|
||||
t.lang = l
|
||||
changed = true
|
||||
}
|
||||
case langMacro:
|
||||
if c&Macro != 0 {
|
||||
// We deviate here from CLDR. The mapping "nb" -> "no"
|
||||
// qualifies as a typical Macro language mapping. However,
|
||||
// for legacy reasons, CLDR maps "no", the macro language
|
||||
// code for Norwegian, to the dominant variant "nb". This
|
||||
// change is currently under consideration for CLDR as well.
|
||||
// See http://unicode.org/cldr/trac/ticket/2698 and also
|
||||
// http://unicode.org/cldr/trac/ticket/1790 for some of the
|
||||
// practical implications. TODO: this check could be removed
|
||||
// if CLDR adopts this change.
|
||||
if c&CLDR == 0 || t.lang != _nb {
|
||||
changed = true
|
||||
t.lang = l
|
||||
}
|
||||
}
|
||||
case langDeprecated:
|
||||
if c&DeprecatedBase != 0 {
|
||||
if t.lang == _mo && t.region == 0 {
|
||||
t.region = _MD
|
||||
}
|
||||
t.lang = l
|
||||
changed = true
|
||||
// Other canonicalization types may still apply.
|
||||
continue
|
||||
}
|
||||
}
|
||||
} else if c&Legacy != 0 && t.lang == _no && c&CLDR != 0 {
|
||||
t.lang = _nb
|
||||
changed = true
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
if c&DeprecatedScript != 0 {
|
||||
if t.script == _Qaai {
|
||||
changed = true
|
||||
t.script = _Zinh
|
||||
}
|
||||
}
|
||||
if c&DeprecatedRegion != 0 {
|
||||
if r := normRegion(t.region); r != 0 {
|
||||
changed = true
|
||||
t.region = r
|
||||
}
|
||||
}
|
||||
return t, changed
|
||||
}
|
||||
|
||||
// Canonicalize returns the canonicalized equivalent of the tag.
|
||||
func (c CanonType) Canonicalize(t Tag) (Tag, error) {
|
||||
t, changed := t.canonicalize(c)
|
||||
if changed {
|
||||
t.remakeString()
|
||||
}
|
||||
return t, nil
|
||||
}
|
||||
|
||||
// Confidence indicates the level of certainty for a given return value.
|
||||
// For example, Serbian may be written in Cyrillic or Latin script.
|
||||
// The confidence level indicates whether a value was explicitly specified,
|
||||
// whether it is typically the only possible value, or whether there is
|
||||
// an ambiguity.
|
||||
type Confidence int
|
||||
|
||||
const (
|
||||
No Confidence = iota // full confidence that there was no match
|
||||
Low // most likely value picked out of a set of alternatives
|
||||
High // value is generally assumed to be the correct match
|
||||
Exact // exact match or explicitly specified value
|
||||
)
|
||||
|
||||
var confName = []string{"No", "Low", "High", "Exact"}
|
||||
|
||||
func (c Confidence) String() string {
|
||||
return confName[c]
|
||||
}
|
||||
|
||||
// remakeString is used to update t.str in case lang, script or region changed.
|
||||
// It is assumed that pExt and pVariant still point to the start of the
|
||||
// respective parts.
|
||||
func (t *Tag) remakeString() {
|
||||
if t.str == "" {
|
||||
return
|
||||
}
|
||||
extra := t.str[t.pVariant:]
|
||||
if t.pVariant > 0 {
|
||||
extra = extra[1:]
|
||||
}
|
||||
if t.equalTags(und) && strings.HasPrefix(extra, "x-") {
|
||||
t.str = extra
|
||||
t.pVariant = 0
|
||||
t.pExt = 0
|
||||
return
|
||||
}
|
||||
var buf [max99thPercentileSize]byte // avoid extra memory allocation in most cases.
|
||||
b := buf[:t.genCoreBytes(buf[:])]
|
||||
if extra != "" {
|
||||
diff := len(b) - int(t.pVariant)
|
||||
b = append(b, '-')
|
||||
b = append(b, extra...)
|
||||
t.pVariant = uint8(int(t.pVariant) + diff)
|
||||
t.pExt = uint16(int(t.pExt) + diff)
|
||||
} else {
|
||||
t.pVariant = uint8(len(b))
|
||||
t.pExt = uint16(len(b))
|
||||
}
|
||||
t.str = string(b)
|
||||
}
|
||||
|
||||
// genCoreBytes writes a string for the base languages, script and region tags
|
||||
// to the given buffer and returns the number of bytes written. It will never
|
||||
// write more than maxCoreSize bytes.
|
||||
func (t *Tag) genCoreBytes(buf []byte) int {
|
||||
n := t.lang.stringToBuf(buf[:])
|
||||
if t.script != 0 {
|
||||
n += copy(buf[n:], "-")
|
||||
n += copy(buf[n:], t.script.String())
|
||||
}
|
||||
if t.region != 0 {
|
||||
n += copy(buf[n:], "-")
|
||||
n += copy(buf[n:], t.region.String())
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// String returns the canonical string representation of the language tag.
|
||||
func (t Tag) String() string {
|
||||
if t.str != "" {
|
||||
return t.str
|
||||
}
|
||||
if t.script == 0 && t.region == 0 {
|
||||
return t.lang.String()
|
||||
}
|
||||
buf := [maxCoreSize]byte{}
|
||||
return string(buf[:t.genCoreBytes(buf[:])])
|
||||
}
|
||||
|
||||
// MarshalText implements encoding.TextMarshaler.
|
||||
func (t Tag) MarshalText() (text []byte, err error) {
|
||||
if t.str != "" {
|
||||
text = append(text, t.str...)
|
||||
} else if t.script == 0 && t.region == 0 {
|
||||
text = append(text, t.lang.String()...)
|
||||
} else {
|
||||
buf := [maxCoreSize]byte{}
|
||||
text = buf[:t.genCoreBytes(buf[:])]
|
||||
}
|
||||
return text, nil
|
||||
}
|
||||
|
||||
// UnmarshalText implements encoding.TextUnmarshaler.
|
||||
func (t *Tag) UnmarshalText(text []byte) error {
|
||||
tag, err := Raw.Parse(string(text))
|
||||
*t = tag
|
||||
return err
|
||||
}
|
||||
|
||||
// Base returns the base language of the language tag. If the base language is
|
||||
// unspecified, an attempt will be made to infer it from the context.
|
||||
// It uses a variant of CLDR's Add Likely Subtags algorithm. This is subject to change.
|
||||
func (t Tag) Base() (Base, Confidence) {
|
||||
if t.lang != 0 {
|
||||
return Base{t.lang}, Exact
|
||||
}
|
||||
c := High
|
||||
if t.script == 0 && !(Region{t.region}).IsCountry() {
|
||||
c = Low
|
||||
}
|
||||
if tag, err := addTags(t); err == nil && tag.lang != 0 {
|
||||
return Base{tag.lang}, c
|
||||
}
|
||||
return Base{0}, No
|
||||
}
|
||||
|
||||
// Script infers the script for the language tag. If it was not explicitly given, it will infer
|
||||
// a most likely candidate.
|
||||
// If more than one script is commonly used for a language, the most likely one
|
||||
// is returned with a low confidence indication. For example, it returns (Cyrl, Low)
|
||||
// for Serbian.
|
||||
// If a script cannot be inferred (Zzzz, No) is returned. We do not use Zyyy (undetermined)
|
||||
// as one would suspect from the IANA registry for BCP 47. In a Unicode context Zyyy marks
|
||||
// common characters (like 1, 2, 3, '.', etc.) and is therefore more like multiple scripts.
|
||||
// See http://www.unicode.org/reports/tr24/#Values for more details. Zzzz is also used for
|
||||
// unknown value in CLDR. (Zzzz, Exact) is returned if Zzzz was explicitly specified.
|
||||
// Note that an inferred script is never guaranteed to be the correct one. Latin is
|
||||
// almost exclusively used for Afrikaans, but Arabic has been used for some texts
|
||||
// in the past. Also, the script that is commonly used may change over time.
|
||||
// It uses a variant of CLDR's Add Likely Subtags algorithm. This is subject to change.
|
||||
func (t Tag) Script() (Script, Confidence) {
|
||||
if t.script != 0 {
|
||||
return Script{t.script}, Exact
|
||||
}
|
||||
sc, c := scriptID(_Zzzz), No
|
||||
if t.lang < langNoIndexOffset {
|
||||
if scr := scriptID(suppressScript[t.lang]); scr != 0 {
|
||||
// Note: it is not always the case that a language with a suppress
|
||||
// script value is only written in one script (e.g. kk, ms, pa).
|
||||
if t.region == 0 {
|
||||
return Script{scriptID(scr)}, High
|
||||
}
|
||||
sc, c = scr, High
|
||||
}
|
||||
}
|
||||
if tag, err := addTags(t); err == nil {
|
||||
if tag.script != sc {
|
||||
sc, c = tag.script, Low
|
||||
}
|
||||
} else {
|
||||
t, _ = (Deprecated | Macro).Canonicalize(t)
|
||||
if tag, err := addTags(t); err == nil && tag.script != sc {
|
||||
sc, c = tag.script, Low
|
||||
}
|
||||
}
|
||||
return Script{sc}, c
|
||||
}
|
||||
|
||||
// Region returns the region for the language tag. If it was not explicitly given, it will
|
||||
// infer a most likely candidate from the context.
|
||||
// It uses a variant of CLDR's Add Likely Subtags algorithm. This is subject to change.
|
||||
func (t Tag) Region() (Region, Confidence) {
|
||||
if t.region != 0 {
|
||||
return Region{t.region}, Exact
|
||||
}
|
||||
if t, err := addTags(t); err == nil {
|
||||
return Region{t.region}, Low // TODO: differentiate between high and low.
|
||||
}
|
||||
t, _ = (Deprecated | Macro).Canonicalize(t)
|
||||
if tag, err := addTags(t); err == nil {
|
||||
return Region{tag.region}, Low
|
||||
}
|
||||
return Region{_ZZ}, No // TODO: return world instead of undetermined?
|
||||
}
|
||||
|
||||
// Variant returns the variants specified explicitly for this language tag.
|
||||
// or nil if no variant was specified.
|
||||
func (t Tag) Variants() []Variant {
|
||||
v := []Variant{}
|
||||
if int(t.pVariant) < int(t.pExt) {
|
||||
for x, str := "", t.str[t.pVariant:t.pExt]; str != ""; {
|
||||
x, str = nextToken(str)
|
||||
v = append(v, Variant{x})
|
||||
}
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// Parent returns the CLDR parent of t. In CLDR, missing fields in data for a
|
||||
// specific language are substituted with fields from the parent language.
|
||||
// The parent for a language may change for newer versions of CLDR.
|
||||
func (t Tag) Parent() Tag {
|
||||
if t.str != "" {
|
||||
// Strip the variants and extensions.
|
||||
t, _ = Raw.Compose(t.Raw())
|
||||
if t.region == 0 && t.script != 0 && t.lang != 0 {
|
||||
base, _ := addTags(Tag{lang: t.lang})
|
||||
if base.script == t.script {
|
||||
return Tag{lang: t.lang}
|
||||
}
|
||||
}
|
||||
return t
|
||||
}
|
||||
if t.lang != 0 {
|
||||
if t.region != 0 {
|
||||
maxScript := t.script
|
||||
if maxScript == 0 {
|
||||
max, _ := addTags(t)
|
||||
maxScript = max.script
|
||||
}
|
||||
|
||||
for i := range parents {
|
||||
if langID(parents[i].lang) == t.lang && scriptID(parents[i].maxScript) == maxScript {
|
||||
for _, r := range parents[i].fromRegion {
|
||||
if regionID(r) == t.region {
|
||||
return Tag{
|
||||
lang: t.lang,
|
||||
script: scriptID(parents[i].script),
|
||||
region: regionID(parents[i].toRegion),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Strip the script if it is the default one.
|
||||
base, _ := addTags(Tag{lang: t.lang})
|
||||
if base.script != maxScript {
|
||||
return Tag{lang: t.lang, script: maxScript}
|
||||
}
|
||||
return Tag{lang: t.lang}
|
||||
} else if t.script != 0 {
|
||||
// The parent for an base-script pair with a non-default script is
|
||||
// "und" instead of the base language.
|
||||
base, _ := addTags(Tag{lang: t.lang})
|
||||
if base.script != t.script {
|
||||
return und
|
||||
}
|
||||
return Tag{lang: t.lang}
|
||||
}
|
||||
}
|
||||
return und
|
||||
}
|
||||
|
||||
// returns token t and the rest of the string.
|
||||
func nextToken(s string) (t, tail string) {
|
||||
p := strings.Index(s[1:], "-")
|
||||
if p == -1 {
|
||||
return s[1:], ""
|
||||
}
|
||||
p++
|
||||
return s[1:p], s[p:]
|
||||
}
|
||||
|
||||
// Extension is a single BCP 47 extension.
|
||||
type Extension struct {
|
||||
s string
|
||||
}
|
||||
|
||||
// String returns the string representation of the extension, including the
|
||||
// type tag.
|
||||
func (e Extension) String() string {
|
||||
return e.s
|
||||
}
|
||||
|
||||
// ParseExtension parses s as an extension and returns it on success.
|
||||
func ParseExtension(s string) (e Extension, err error) {
|
||||
scan := makeScannerString(s)
|
||||
var end int
|
||||
if n := len(scan.token); n != 1 {
|
||||
return Extension{}, errSyntax
|
||||
}
|
||||
scan.toLower(0, len(scan.b))
|
||||
end = parseExtension(&scan)
|
||||
if end != len(s) {
|
||||
return Extension{}, errSyntax
|
||||
}
|
||||
return Extension{string(scan.b)}, nil
|
||||
}
|
||||
|
||||
// Type returns the one-byte extension type of e. It returns 0 for the zero
|
||||
// exception.
|
||||
func (e Extension) Type() byte {
|
||||
if e.s == "" {
|
||||
return 0
|
||||
}
|
||||
return e.s[0]
|
||||
}
|
||||
|
||||
// Tokens returns the list of tokens of e.
|
||||
func (e Extension) Tokens() []string {
|
||||
return strings.Split(e.s, "-")
|
||||
}
|
||||
|
||||
// Extension returns the extension of type x for tag t. It will return
|
||||
// false for ok if t does not have the requested extension. The returned
|
||||
// extension will be invalid in this case.
|
||||
func (t Tag) Extension(x byte) (ext Extension, ok bool) {
|
||||
for i := int(t.pExt); i < len(t.str)-1; {
|
||||
var ext string
|
||||
i, ext = getExtension(t.str, i)
|
||||
if ext[0] == x {
|
||||
return Extension{ext}, true
|
||||
}
|
||||
}
|
||||
return Extension{}, false
|
||||
}
|
||||
|
||||
// Extensions returns all extensions of t.
|
||||
func (t Tag) Extensions() []Extension {
|
||||
e := []Extension{}
|
||||
for i := int(t.pExt); i < len(t.str)-1; {
|
||||
var ext string
|
||||
i, ext = getExtension(t.str, i)
|
||||
e = append(e, Extension{ext})
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
||||
// TypeForKey returns the type associated with the given key, where key and type
|
||||
// are of the allowed values defined for the Unicode locale extension ('u') in
|
||||
// http://www.unicode.org/reports/tr35/#Unicode_Language_and_Locale_Identifiers.
|
||||
// TypeForKey will traverse the inheritance chain to get the correct value.
|
||||
func (t Tag) TypeForKey(key string) string {
|
||||
if start, end, _ := t.findTypeForKey(key); end != start {
|
||||
return t.str[start:end]
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
var (
|
||||
errPrivateUse = errors.New("cannot set a key on a private use tag")
|
||||
errInvalidArguments = errors.New("invalid key or type")
|
||||
)
|
||||
|
||||
// SetTypeForKey returns a new Tag with the key set to type, where key and type
|
||||
// are of the allowed values defined for the Unicode locale extension ('u') in
|
||||
// http://www.unicode.org/reports/tr35/#Unicode_Language_and_Locale_Identifiers.
|
||||
// An empty value removes an existing pair with the same key.
|
||||
func (t Tag) SetTypeForKey(key, value string) (Tag, error) {
|
||||
if t.private() {
|
||||
return t, errPrivateUse
|
||||
}
|
||||
if len(key) != 2 {
|
||||
return t, errInvalidArguments
|
||||
}
|
||||
|
||||
// Remove the setting if value is "".
|
||||
if value == "" {
|
||||
start, end, _ := t.findTypeForKey(key)
|
||||
if start != end {
|
||||
// Remove key tag and leading '-'.
|
||||
start -= 4
|
||||
|
||||
// Remove a possible empty extension.
|
||||
if (end == len(t.str) || t.str[end+2] == '-') && t.str[start-2] == '-' {
|
||||
start -= 2
|
||||
}
|
||||
if start == int(t.pVariant) && end == len(t.str) {
|
||||
t.str = ""
|
||||
t.pVariant, t.pExt = 0, 0
|
||||
} else {
|
||||
t.str = fmt.Sprintf("%s%s", t.str[:start], t.str[end:])
|
||||
}
|
||||
}
|
||||
return t, nil
|
||||
}
|
||||
|
||||
if len(value) < 3 || len(value) > 8 {
|
||||
return t, errInvalidArguments
|
||||
}
|
||||
|
||||
var (
|
||||
buf [maxCoreSize + maxSimpleUExtensionSize]byte
|
||||
uStart int // start of the -u extension.
|
||||
)
|
||||
|
||||
// Generate the tag string if needed.
|
||||
if t.str == "" {
|
||||
uStart = t.genCoreBytes(buf[:])
|
||||
buf[uStart] = '-'
|
||||
uStart++
|
||||
}
|
||||
|
||||
// Create new key-type pair and parse it to verify.
|
||||
b := buf[uStart:]
|
||||
copy(b, "u-")
|
||||
copy(b[2:], key)
|
||||
b[4] = '-'
|
||||
b = b[:5+copy(b[5:], value)]
|
||||
scan := makeScanner(b)
|
||||
if parseExtensions(&scan); scan.err != nil {
|
||||
return t, scan.err
|
||||
}
|
||||
|
||||
// Assemble the replacement string.
|
||||
if t.str == "" {
|
||||
t.pVariant, t.pExt = byte(uStart-1), uint16(uStart-1)
|
||||
t.str = string(buf[:uStart+len(b)])
|
||||
} else {
|
||||
s := t.str
|
||||
start, end, hasExt := t.findTypeForKey(key)
|
||||
if start == end {
|
||||
if hasExt {
|
||||
b = b[2:]
|
||||
}
|
||||
t.str = fmt.Sprintf("%s-%s%s", s[:start], b, s[end:])
|
||||
} else {
|
||||
t.str = fmt.Sprintf("%s%s%s", s[:start], value, s[end:])
|
||||
}
|
||||
}
|
||||
return t, nil
|
||||
}
|
||||
|
||||
// findKeyAndType returns the start and end position for the type corresponding
|
||||
// to key or the point at which to insert the key-value pair if the type
|
||||
// wasn't found. The hasExt return value reports whether an -u extension was present.
|
||||
// Note: the extensions are typically very small and are likely to contain
|
||||
// only one key-type pair.
|
||||
func (t Tag) findTypeForKey(key string) (start, end int, hasExt bool) {
|
||||
p := int(t.pExt)
|
||||
if len(key) != 2 || p == len(t.str) || p == 0 {
|
||||
return p, p, false
|
||||
}
|
||||
s := t.str
|
||||
|
||||
// Find the correct extension.
|
||||
for p++; s[p] != 'u'; p++ {
|
||||
if s[p] > 'u' {
|
||||
p--
|
||||
return p, p, false
|
||||
}
|
||||
if p = nextExtension(s, p); p == len(s) {
|
||||
return len(s), len(s), false
|
||||
}
|
||||
}
|
||||
// Proceed to the hyphen following the extension name.
|
||||
p++
|
||||
|
||||
// curKey is the key currently being processed.
|
||||
curKey := ""
|
||||
|
||||
// Iterate over keys until we get the end of a section.
|
||||
for {
|
||||
// p points to the hyphen preceding the current token.
|
||||
if p3 := p + 3; s[p3] == '-' {
|
||||
// Found a key.
|
||||
// Check whether we just processed the key that was requested.
|
||||
if curKey == key {
|
||||
return start, p, true
|
||||
}
|
||||
// Set to the next key and continue scanning type tokens.
|
||||
curKey = s[p+1 : p3]
|
||||
if curKey > key {
|
||||
return p, p, true
|
||||
}
|
||||
// Start of the type token sequence.
|
||||
start = p + 4
|
||||
// A type is at least 3 characters long.
|
||||
p += 7 // 4 + 3
|
||||
} else {
|
||||
// Attribute or type, which is at least 3 characters long.
|
||||
p += 4
|
||||
}
|
||||
// p points past the third character of a type or attribute.
|
||||
max := p + 5 // maximum length of token plus hyphen.
|
||||
if len(s) < max {
|
||||
max = len(s)
|
||||
}
|
||||
for ; p < max && s[p] != '-'; p++ {
|
||||
}
|
||||
// Bail if we have exhausted all tokens or if the next token starts
|
||||
// a new extension.
|
||||
if p == len(s) || s[p+2] == '-' {
|
||||
if curKey == key {
|
||||
return start, p, true
|
||||
}
|
||||
return p, p, true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// CompactIndex returns an index, where 0 <= index < NumCompactTags, for tags
|
||||
// for which data exists in the text repository. The index will change over time
|
||||
// and should not be stored in persistent storage. Extensions, except for the
|
||||
// 'va' type of the 'u' extension, are ignored. It will return 0, false if no
|
||||
// compact tag exists, where 0 is the index for the root language (Und).
|
||||
func CompactIndex(t Tag) (index int, ok bool) {
|
||||
// TODO: perhaps give more frequent tags a lower index.
|
||||
// TODO: we could make the indexes stable. This will excluded some
|
||||
// possibilities for optimization, so don't do this quite yet.
|
||||
b, s, r := t.Raw()
|
||||
if len(t.str) > 0 {
|
||||
if strings.HasPrefix(t.str, "x-") {
|
||||
// We have no entries for user-defined tags.
|
||||
return 0, false
|
||||
}
|
||||
if uint16(t.pVariant) != t.pExt {
|
||||
// There are no tags with variants and an u-va type.
|
||||
if t.TypeForKey("va") != "" {
|
||||
return 0, false
|
||||
}
|
||||
t, _ = Raw.Compose(b, s, r, t.Variants())
|
||||
} else if _, ok := t.Extension('u'); ok {
|
||||
// Strip all but the 'va' entry.
|
||||
variant := t.TypeForKey("va")
|
||||
t, _ = Raw.Compose(b, s, r)
|
||||
t, _ = t.SetTypeForKey("va", variant)
|
||||
}
|
||||
if len(t.str) > 0 {
|
||||
// We have some variants.
|
||||
for i, s := range specialTags {
|
||||
if s == t {
|
||||
return i + 1, true
|
||||
}
|
||||
}
|
||||
return 0, false
|
||||
}
|
||||
}
|
||||
// No variants specified: just compare core components.
|
||||
// The key has the form lllssrrr, where l, s, and r are nibbles for
|
||||
// respectively the langID, scriptID, and regionID.
|
||||
key := uint32(b.langID) << (8 + 12)
|
||||
key |= uint32(s.scriptID) << 12
|
||||
key |= uint32(r.regionID)
|
||||
x, ok := coreTags[key]
|
||||
return int(x), ok
|
||||
}
|
||||
|
||||
// Base is an ISO 639 language code, used for encoding the base language
|
||||
// of a language tag.
|
||||
type Base struct {
|
||||
langID
|
||||
}
|
||||
|
||||
// ParseBase parses a 2- or 3-letter ISO 639 code.
|
||||
// It returns a ValueError if s is a well-formed but unknown language identifier
|
||||
// or another error if another error occurred.
|
||||
func ParseBase(s string) (Base, error) {
|
||||
if n := len(s); n < 2 || 3 < n {
|
||||
return Base{}, errSyntax
|
||||
}
|
||||
var buf [3]byte
|
||||
l, err := getLangID(buf[:copy(buf[:], s)])
|
||||
return Base{l}, err
|
||||
}
|
||||
|
||||
// Script is a 4-letter ISO 15924 code for representing scripts.
|
||||
// It is idiomatically represented in title case.
|
||||
type Script struct {
|
||||
scriptID
|
||||
}
|
||||
|
||||
// ParseScript parses a 4-letter ISO 15924 code.
|
||||
// It returns a ValueError if s is a well-formed but unknown script identifier
|
||||
// or another error if another error occurred.
|
||||
func ParseScript(s string) (Script, error) {
|
||||
if len(s) != 4 {
|
||||
return Script{}, errSyntax
|
||||
}
|
||||
var buf [4]byte
|
||||
sc, err := getScriptID(script, buf[:copy(buf[:], s)])
|
||||
return Script{sc}, err
|
||||
}
|
||||
|
||||
// Region is an ISO 3166-1 or UN M.49 code for representing countries and regions.
|
||||
type Region struct {
|
||||
regionID
|
||||
}
|
||||
|
||||
// EncodeM49 returns the Region for the given UN M.49 code.
|
||||
// It returns an error if r is not a valid code.
|
||||
func EncodeM49(r int) (Region, error) {
|
||||
rid, err := getRegionM49(r)
|
||||
return Region{rid}, err
|
||||
}
|
||||
|
||||
// ParseRegion parses a 2- or 3-letter ISO 3166-1 or a UN M.49 code.
|
||||
// It returns a ValueError if s is a well-formed but unknown region identifier
|
||||
// or another error if another error occurred.
|
||||
func ParseRegion(s string) (Region, error) {
|
||||
if n := len(s); n < 2 || 3 < n {
|
||||
return Region{}, errSyntax
|
||||
}
|
||||
var buf [3]byte
|
||||
r, err := getRegionID(buf[:copy(buf[:], s)])
|
||||
return Region{r}, err
|
||||
}
|
||||
|
||||
// IsCountry returns whether this region is a country or autonomous area. This
|
||||
// includes non-standard definitions from CLDR.
|
||||
func (r Region) IsCountry() bool {
|
||||
if r.regionID == 0 || r.IsGroup() || r.IsPrivateUse() && r.regionID != _XK {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// IsGroup returns whether this region defines a collection of regions. This
|
||||
// includes non-standard definitions from CLDR.
|
||||
func (r Region) IsGroup() bool {
|
||||
if r.regionID == 0 {
|
||||
return false
|
||||
}
|
||||
return int(regionInclusion[r.regionID]) < len(regionContainment)
|
||||
}
|
||||
|
||||
// Contains returns whether Region c is contained by Region r. It returns true
|
||||
// if c == r.
|
||||
func (r Region) Contains(c Region) bool {
|
||||
return r.regionID.contains(c.regionID)
|
||||
}
|
||||
|
||||
func (r regionID) contains(c regionID) bool {
|
||||
if r == c {
|
||||
return true
|
||||
}
|
||||
g := regionInclusion[r]
|
||||
if g >= nRegionGroups {
|
||||
return false
|
||||
}
|
||||
m := regionContainment[g]
|
||||
|
||||
d := regionInclusion[c]
|
||||
b := regionInclusionBits[d]
|
||||
|
||||
// A contained country may belong to multiple disjoint groups. Matching any
|
||||
// of these indicates containment. If the contained region is a group, it
|
||||
// must strictly be a subset.
|
||||
if d >= nRegionGroups {
|
||||
return b&m != 0
|
||||
}
|
||||
return b&^m == 0
|
||||
}
|
||||
|
||||
var errNoTLD = errors.New("language: region is not a valid ccTLD")
|
||||
|
||||
// TLD returns the country code top-level domain (ccTLD). UK is returned for GB.
|
||||
// In all other cases it returns either the region itself or an error.
|
||||
//
|
||||
// This method may return an error for a region for which there exists a
|
||||
// canonical form with a ccTLD. To get that ccTLD canonicalize r first. The
|
||||
// region will already be canonicalized it was obtained from a Tag that was
|
||||
// obtained using any of the default methods.
|
||||
func (r Region) TLD() (Region, error) {
|
||||
// See http://en.wikipedia.org/wiki/Country_code_top-level_domain for the
|
||||
// difference between ISO 3166-1 and IANA ccTLD.
|
||||
if r.regionID == _GB {
|
||||
r = Region{_UK}
|
||||
}
|
||||
if (r.typ() & ccTLD) == 0 {
|
||||
return Region{}, errNoTLD
|
||||
}
|
||||
return r, nil
|
||||
}
|
||||
|
||||
// Canonicalize returns the region or a possible replacement if the region is
|
||||
// deprecated. It will not return a replacement for deprecated regions that
|
||||
// are split into multiple regions.
|
||||
func (r Region) Canonicalize() Region {
|
||||
if cr := normRegion(r.regionID); cr != 0 {
|
||||
return Region{cr}
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
// Variant represents a registered variant of a language as defined by BCP 47.
|
||||
type Variant struct {
|
||||
variant string
|
||||
}
|
||||
|
||||
// ParseVariant parses and returns a Variant. An error is returned if s is not
|
||||
// a valid variant.
|
||||
func ParseVariant(s string) (Variant, error) {
|
||||
s = strings.ToLower(s)
|
||||
if _, ok := variantIndex[s]; ok {
|
||||
return Variant{s}, nil
|
||||
}
|
||||
return Variant{}, mkErrInvalid([]byte(s))
|
||||
}
|
||||
|
||||
// String returns the string representation of the variant.
|
||||
func (v Variant) String() string {
|
||||
return v.variant
|
||||
}
|
911
vendor/golang.org/x/text/language/language_test.go
generated
vendored
Normal file
911
vendor/golang.org/x/text/language/language_test.go
generated
vendored
Normal file
@ -0,0 +1,911 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package language
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"golang.org/x/text/internal/testtext"
|
||||
)
|
||||
|
||||
func TestTagSize(t *testing.T) {
|
||||
id := Tag{}
|
||||
typ := reflect.TypeOf(id)
|
||||
if typ.Size() > 24 {
|
||||
t.Errorf("size of Tag was %d; want 24", typ.Size())
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsRoot(t *testing.T) {
|
||||
loc := Tag{}
|
||||
if !loc.IsRoot() {
|
||||
t.Errorf("unspecified should be root.")
|
||||
}
|
||||
for i, tt := range parseTests() {
|
||||
loc, _ := Parse(tt.in)
|
||||
undef := tt.lang == "und" && tt.script == "" && tt.region == "" && tt.ext == ""
|
||||
if loc.IsRoot() != undef {
|
||||
t.Errorf("%d: was %v; want %v", i, loc.IsRoot(), undef)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEquality(t *testing.T) {
|
||||
for i, tt := range parseTests()[48:49] {
|
||||
s := tt.in
|
||||
tag := Make(s)
|
||||
t1 := Make(tag.String())
|
||||
if tag != t1 {
|
||||
t.Errorf("%d:%s: equality test 1 failed\n got: %#v\nwant: %#v)", i, s, t1, tag)
|
||||
}
|
||||
t2, _ := Compose(tag)
|
||||
if tag != t2 {
|
||||
t.Errorf("%d:%s: equality test 2 failed\n got: %#v\nwant: %#v", i, s, t2, tag)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMakeString(t *testing.T) {
|
||||
tests := []struct{ in, out string }{
|
||||
{"und", "und"},
|
||||
{"und", "und-CW"},
|
||||
{"nl", "nl-NL"},
|
||||
{"de-1901", "nl-1901"},
|
||||
{"de-1901", "de-Arab-1901"},
|
||||
{"x-a-b", "de-Arab-x-a-b"},
|
||||
{"x-a-b", "x-a-b"},
|
||||
}
|
||||
for i, tt := range tests {
|
||||
id, _ := Parse(tt.in)
|
||||
mod, _ := Parse(tt.out)
|
||||
id.setTagsFrom(mod)
|
||||
for j := 0; j < 2; j++ {
|
||||
id.remakeString()
|
||||
if str := id.String(); str != tt.out {
|
||||
t.Errorf("%d:%d: found %s; want %s", i, j, id.String(), tt.out)
|
||||
}
|
||||
}
|
||||
// The bytes to string conversion as used in remakeString
|
||||
// occasionally measures as more than one alloc, breaking this test.
|
||||
// To alleviate this we set the number of runs to more than 1.
|
||||
if n := testtext.AllocsPerRun(8, id.remakeString); n > 1 {
|
||||
t.Errorf("%d: # allocs got %.1f; want <= 1", i, n)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCompactIndex(t *testing.T) {
|
||||
tests := []struct {
|
||||
tag string
|
||||
index int
|
||||
ok bool
|
||||
}{
|
||||
// TODO: these values will change with each CLDR update. This issue
|
||||
// will be solved if we decide to fix the indexes.
|
||||
{"und", 0, true},
|
||||
{"ca-ES-valencia", 1, true},
|
||||
{"ca-ES-valencia-u-va-posix", 0, false},
|
||||
{"ca-ES-valencia-u-co-phonebk", 1, true},
|
||||
{"ca-ES-valencia-u-co-phonebk-va-posix", 0, false},
|
||||
{"x-klingon", 0, false},
|
||||
{"en-US", 232, true},
|
||||
{"en-US-u-va-posix", 2, true},
|
||||
{"en", 136, true},
|
||||
{"en-u-co-phonebk", 136, true},
|
||||
{"en-001", 137, true},
|
||||
{"sh", 0, false}, // We don't normalize.
|
||||
}
|
||||
for _, tt := range tests {
|
||||
x, ok := CompactIndex(Raw.MustParse(tt.tag))
|
||||
if x != tt.index || ok != tt.ok {
|
||||
t.Errorf("%s: got %d, %v; want %d %v", tt.tag, x, ok, tt.index, tt.ok)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshal(t *testing.T) {
|
||||
testCases := []string{
|
||||
// TODO: these values will change with each CLDR update. This issue
|
||||
// will be solved if we decide to fix the indexes.
|
||||
"und",
|
||||
"ca-ES-valencia",
|
||||
"ca-ES-valencia-u-va-posix",
|
||||
"ca-ES-valencia-u-co-phonebk",
|
||||
"ca-ES-valencia-u-co-phonebk-va-posix",
|
||||
"x-klingon",
|
||||
"en-US",
|
||||
"en-US-u-va-posix",
|
||||
"en",
|
||||
"en-u-co-phonebk",
|
||||
"en-001",
|
||||
"sh",
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
var tag Tag
|
||||
err := tag.UnmarshalText([]byte(tc))
|
||||
if err != nil {
|
||||
t.Errorf("UnmarshalText(%q): unexpected error: %v", tc, err)
|
||||
}
|
||||
b, err := tag.MarshalText()
|
||||
if err != nil {
|
||||
t.Errorf("MarshalText(%q): unexpected error: %v", tc, err)
|
||||
}
|
||||
if got := string(b); got != tc {
|
||||
t.Errorf("%s: got %q; want %q", tc, got, tc)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestBase(t *testing.T) {
|
||||
tests := []struct {
|
||||
loc, lang string
|
||||
conf Confidence
|
||||
}{
|
||||
{"und", "en", Low},
|
||||
{"x-abc", "und", No},
|
||||
{"en", "en", Exact},
|
||||
{"und-Cyrl", "ru", High},
|
||||
// If a region is not included, the official language should be English.
|
||||
{"und-US", "en", High},
|
||||
// TODO: not-explicitly listed scripts should probably be und, No
|
||||
// Modify addTags to return info on how the match was derived.
|
||||
// {"und-Aghb", "und", No},
|
||||
}
|
||||
for i, tt := range tests {
|
||||
loc, _ := Parse(tt.loc)
|
||||
lang, conf := loc.Base()
|
||||
if lang.String() != tt.lang {
|
||||
t.Errorf("%d: language was %s; want %s", i, lang, tt.lang)
|
||||
}
|
||||
if conf != tt.conf {
|
||||
t.Errorf("%d: confidence was %d; want %d", i, conf, tt.conf)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseBase(t *testing.T) {
|
||||
tests := []struct {
|
||||
in string
|
||||
out string
|
||||
ok bool
|
||||
}{
|
||||
{"en", "en", true},
|
||||
{"EN", "en", true},
|
||||
{"nld", "nl", true},
|
||||
{"dut", "dut", true}, // bibliographic
|
||||
{"aaj", "und", false}, // unknown
|
||||
{"qaa", "qaa", true},
|
||||
{"a", "und", false},
|
||||
{"", "und", false},
|
||||
{"aaaa", "und", false},
|
||||
}
|
||||
for i, tt := range tests {
|
||||
x, err := ParseBase(tt.in)
|
||||
if x.String() != tt.out || err == nil != tt.ok {
|
||||
t.Errorf("%d:%s: was %s, %v; want %s, %v", i, tt.in, x, err == nil, tt.out, tt.ok)
|
||||
}
|
||||
if y, _, _ := Raw.Make(tt.out).Raw(); x != y {
|
||||
t.Errorf("%d:%s: tag was %s; want %s", i, tt.in, x, y)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestScript(t *testing.T) {
|
||||
tests := []struct {
|
||||
loc, scr string
|
||||
conf Confidence
|
||||
}{
|
||||
{"und", "Latn", Low},
|
||||
{"en-Latn", "Latn", Exact},
|
||||
{"en", "Latn", High},
|
||||
{"sr", "Cyrl", Low},
|
||||
{"kk", "Cyrl", High},
|
||||
{"kk-CN", "Arab", Low},
|
||||
{"cmn", "Hans", Low},
|
||||
{"ru", "Cyrl", High},
|
||||
{"ru-RU", "Cyrl", High},
|
||||
{"yue", "Hant", Low},
|
||||
{"x-abc", "Zzzz", Low},
|
||||
{"und-zyyy", "Zyyy", Exact},
|
||||
}
|
||||
for i, tt := range tests {
|
||||
loc, _ := Parse(tt.loc)
|
||||
sc, conf := loc.Script()
|
||||
if sc.String() != tt.scr {
|
||||
t.Errorf("%d:%s: script was %s; want %s", i, tt.loc, sc, tt.scr)
|
||||
}
|
||||
if conf != tt.conf {
|
||||
t.Errorf("%d:%s: confidence was %d; want %d", i, tt.loc, conf, tt.conf)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseScript(t *testing.T) {
|
||||
tests := []struct {
|
||||
in string
|
||||
out string
|
||||
ok bool
|
||||
}{
|
||||
{"Latn", "Latn", true},
|
||||
{"zzzz", "Zzzz", true},
|
||||
{"zyyy", "Zyyy", true},
|
||||
{"Latm", "Zzzz", false},
|
||||
{"Zzz", "Zzzz", false},
|
||||
{"", "Zzzz", false},
|
||||
{"Zzzxx", "Zzzz", false},
|
||||
}
|
||||
for i, tt := range tests {
|
||||
x, err := ParseScript(tt.in)
|
||||
if x.String() != tt.out || err == nil != tt.ok {
|
||||
t.Errorf("%d:%s: was %s, %v; want %s, %v", i, tt.in, x, err == nil, tt.out, tt.ok)
|
||||
}
|
||||
if err == nil {
|
||||
if _, y, _ := Raw.Make("und-" + tt.out).Raw(); x != y {
|
||||
t.Errorf("%d:%s: tag was %s; want %s", i, tt.in, x, y)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRegion(t *testing.T) {
|
||||
tests := []struct {
|
||||
loc, reg string
|
||||
conf Confidence
|
||||
}{
|
||||
{"und", "US", Low},
|
||||
{"en", "US", Low},
|
||||
{"zh-Hant", "TW", Low},
|
||||
{"en-US", "US", Exact},
|
||||
{"cmn", "CN", Low},
|
||||
{"ru", "RU", Low},
|
||||
{"yue", "HK", Low},
|
||||
{"x-abc", "ZZ", Low},
|
||||
}
|
||||
for i, tt := range tests {
|
||||
loc, _ := Raw.Parse(tt.loc)
|
||||
reg, conf := loc.Region()
|
||||
if reg.String() != tt.reg {
|
||||
t.Errorf("%d:%s: region was %s; want %s", i, tt.loc, reg, tt.reg)
|
||||
}
|
||||
if conf != tt.conf {
|
||||
t.Errorf("%d:%s: confidence was %d; want %d", i, tt.loc, conf, tt.conf)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncodeM49(t *testing.T) {
|
||||
tests := []struct {
|
||||
m49 int
|
||||
code string
|
||||
ok bool
|
||||
}{
|
||||
{1, "001", true},
|
||||
{840, "US", true},
|
||||
{899, "ZZ", false},
|
||||
}
|
||||
for i, tt := range tests {
|
||||
if r, err := EncodeM49(tt.m49); r.String() != tt.code || err == nil != tt.ok {
|
||||
t.Errorf("%d:%d: was %s, %v; want %s, %v", i, tt.m49, r, err == nil, tt.code, tt.ok)
|
||||
}
|
||||
}
|
||||
for i := 1; i <= 1000; i++ {
|
||||
if r, err := EncodeM49(i); err == nil && r.M49() == 0 {
|
||||
t.Errorf("%d has no error, but maps to undefined region", i)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseRegion(t *testing.T) {
|
||||
tests := []struct {
|
||||
in string
|
||||
out string
|
||||
ok bool
|
||||
}{
|
||||
{"001", "001", true},
|
||||
{"840", "US", true},
|
||||
{"899", "ZZ", false},
|
||||
{"USA", "US", true},
|
||||
{"US", "US", true},
|
||||
{"BC", "ZZ", false},
|
||||
{"C", "ZZ", false},
|
||||
{"CCCC", "ZZ", false},
|
||||
{"01", "ZZ", false},
|
||||
}
|
||||
for i, tt := range tests {
|
||||
r, err := ParseRegion(tt.in)
|
||||
if r.String() != tt.out || err == nil != tt.ok {
|
||||
t.Errorf("%d:%s: was %s, %v; want %s, %v", i, tt.in, r, err == nil, tt.out, tt.ok)
|
||||
}
|
||||
if err == nil {
|
||||
if _, _, y := Raw.Make("und-" + tt.out).Raw(); r != y {
|
||||
t.Errorf("%d:%s: tag was %s; want %s", i, tt.in, r, y)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsCountry(t *testing.T) {
|
||||
tests := []struct {
|
||||
reg string
|
||||
country bool
|
||||
}{
|
||||
{"US", true},
|
||||
{"001", false},
|
||||
{"958", false},
|
||||
{"419", false},
|
||||
{"203", true},
|
||||
{"020", true},
|
||||
{"900", false},
|
||||
{"999", false},
|
||||
{"QO", false},
|
||||
{"EU", false},
|
||||
{"AA", false},
|
||||
{"XK", true},
|
||||
}
|
||||
for i, tt := range tests {
|
||||
reg, _ := getRegionID([]byte(tt.reg))
|
||||
r := Region{reg}
|
||||
if r.IsCountry() != tt.country {
|
||||
t.Errorf("%d: IsCountry(%s) was %v; want %v", i, tt.reg, r.IsCountry(), tt.country)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsGroup(t *testing.T) {
|
||||
tests := []struct {
|
||||
reg string
|
||||
group bool
|
||||
}{
|
||||
{"US", false},
|
||||
{"001", true},
|
||||
{"958", false},
|
||||
{"419", true},
|
||||
{"203", false},
|
||||
{"020", false},
|
||||
{"900", false},
|
||||
{"999", false},
|
||||
{"QO", true},
|
||||
{"EU", true},
|
||||
{"AA", false},
|
||||
{"XK", false},
|
||||
}
|
||||
for i, tt := range tests {
|
||||
reg, _ := getRegionID([]byte(tt.reg))
|
||||
r := Region{reg}
|
||||
if r.IsGroup() != tt.group {
|
||||
t.Errorf("%d: IsGroup(%s) was %v; want %v", i, tt.reg, r.IsGroup(), tt.group)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestContains(t *testing.T) {
|
||||
tests := []struct {
|
||||
enclosing, contained string
|
||||
contains bool
|
||||
}{
|
||||
// A region contains itself.
|
||||
{"US", "US", true},
|
||||
{"001", "001", true},
|
||||
|
||||
// Direct containment.
|
||||
{"001", "002", true},
|
||||
{"039", "XK", true},
|
||||
{"150", "XK", true},
|
||||
{"EU", "AT", true},
|
||||
{"QO", "AQ", true},
|
||||
|
||||
// Indirect containemnt.
|
||||
{"001", "US", true},
|
||||
{"001", "419", true},
|
||||
{"001", "013", true},
|
||||
|
||||
// No containment.
|
||||
{"US", "001", false},
|
||||
{"155", "EU", false},
|
||||
}
|
||||
for i, tt := range tests {
|
||||
enc, _ := getRegionID([]byte(tt.enclosing))
|
||||
con, _ := getRegionID([]byte(tt.contained))
|
||||
r := Region{enc}
|
||||
if got := r.Contains(Region{con}); got != tt.contains {
|
||||
t.Errorf("%d: %s.Contains(%s) was %v; want %v", i, tt.enclosing, tt.contained, got, tt.contains)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRegionCanonicalize(t *testing.T) {
|
||||
for i, tt := range []struct{ in, out string }{
|
||||
{"UK", "GB"},
|
||||
{"TP", "TL"},
|
||||
{"QU", "EU"},
|
||||
{"SU", "SU"},
|
||||
{"VD", "VN"},
|
||||
{"DD", "DE"},
|
||||
} {
|
||||
r := MustParseRegion(tt.in)
|
||||
want := MustParseRegion(tt.out)
|
||||
if got := r.Canonicalize(); got != want {
|
||||
t.Errorf("%d: got %v; want %v", i, got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRegionTLD(t *testing.T) {
|
||||
for _, tt := range []struct {
|
||||
in, out string
|
||||
ok bool
|
||||
}{
|
||||
{"EH", "EH", true},
|
||||
{"FR", "FR", true},
|
||||
{"TL", "TL", true},
|
||||
|
||||
// In ccTLD before in ISO.
|
||||
{"GG", "GG", true},
|
||||
|
||||
// Non-standard assignment of ccTLD to ISO code.
|
||||
{"GB", "UK", true},
|
||||
|
||||
// Exceptionally reserved in ISO and valid ccTLD.
|
||||
{"UK", "UK", true},
|
||||
{"AC", "AC", true},
|
||||
{"EU", "EU", true},
|
||||
{"SU", "SU", true},
|
||||
|
||||
// Exceptionally reserved in ISO and invalid ccTLD.
|
||||
{"CP", "ZZ", false},
|
||||
{"DG", "ZZ", false},
|
||||
{"EA", "ZZ", false},
|
||||
{"FX", "ZZ", false},
|
||||
{"IC", "ZZ", false},
|
||||
{"TA", "ZZ", false},
|
||||
|
||||
// Transitionally reserved in ISO (e.g. deprecated) but valid ccTLD as
|
||||
// it is still being phased out.
|
||||
{"AN", "AN", true},
|
||||
{"TP", "TP", true},
|
||||
|
||||
// Transitionally reserved in ISO (e.g. deprecated) and invalid ccTLD.
|
||||
// Defined in package language as it has a mapping in CLDR.
|
||||
{"BU", "ZZ", false},
|
||||
{"CS", "ZZ", false},
|
||||
{"NT", "ZZ", false},
|
||||
{"YU", "ZZ", false},
|
||||
{"ZR", "ZZ", false},
|
||||
// Not defined in package: SF.
|
||||
|
||||
// Indeterminately reserved in ISO.
|
||||
// Defined in package language as it has a legacy mapping in CLDR.
|
||||
{"DY", "ZZ", false},
|
||||
{"RH", "ZZ", false},
|
||||
{"VD", "ZZ", false},
|
||||
// Not defined in package: EW, FL, JA, LF, PI, RA, RB, RC, RI, RL, RM,
|
||||
// RN, RP, WG, WL, WV, and YV.
|
||||
|
||||
// Not assigned in ISO, but legacy definitions in CLDR.
|
||||
{"DD", "ZZ", false},
|
||||
{"YD", "ZZ", false},
|
||||
|
||||
// Normal mappings but somewhat special status in ccTLD.
|
||||
{"BL", "BL", true},
|
||||
{"MF", "MF", true},
|
||||
{"BV", "BV", true},
|
||||
{"SJ", "SJ", true},
|
||||
|
||||
// Have values when normalized, but not as is.
|
||||
{"QU", "ZZ", false},
|
||||
|
||||
// ISO Private Use.
|
||||
{"AA", "ZZ", false},
|
||||
{"QM", "ZZ", false},
|
||||
{"QO", "ZZ", false},
|
||||
{"XA", "ZZ", false},
|
||||
{"XK", "ZZ", false}, // Sometimes used for Kosovo, but invalid ccTLD.
|
||||
} {
|
||||
if tt.in == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
r := MustParseRegion(tt.in)
|
||||
var want Region
|
||||
if tt.out != "ZZ" {
|
||||
want = MustParseRegion(tt.out)
|
||||
}
|
||||
tld, err := r.TLD()
|
||||
if got := err == nil; got != tt.ok {
|
||||
t.Errorf("error(%v): got %v; want %v", r, got, tt.ok)
|
||||
}
|
||||
if tld != want {
|
||||
t.Errorf("TLD(%v): got %v; want %v", r, tld, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCanonicalize(t *testing.T) {
|
||||
// TODO: do a full test using CLDR data in a separate regression test.
|
||||
tests := []struct {
|
||||
in, out string
|
||||
option CanonType
|
||||
}{
|
||||
{"en-Latn", "en", SuppressScript},
|
||||
{"sr-Cyrl", "sr-Cyrl", SuppressScript},
|
||||
{"sh", "sr-Latn", Legacy},
|
||||
{"sh-HR", "sr-Latn-HR", Legacy},
|
||||
{"sh-Cyrl-HR", "sr-Cyrl-HR", Legacy},
|
||||
{"tl", "fil", Legacy},
|
||||
{"no", "no", Legacy},
|
||||
{"no", "nb", Legacy | CLDR},
|
||||
{"cmn", "cmn", Legacy},
|
||||
{"cmn", "zh", Macro},
|
||||
{"cmn-u-co-stroke", "zh-u-co-stroke", Macro},
|
||||
{"yue", "yue", Macro},
|
||||
{"nb", "no", Macro},
|
||||
{"nb", "nb", Macro | CLDR},
|
||||
{"no", "no", Macro},
|
||||
{"no", "no", Macro | CLDR},
|
||||
{"iw", "he", DeprecatedBase},
|
||||
{"iw", "he", Deprecated | CLDR},
|
||||
{"mo", "ro-MD", Deprecated}, // Adopted by CLDR as of version 25.
|
||||
{"alb", "sq", Legacy}, // bibliographic
|
||||
{"dut", "nl", Legacy}, // bibliographic
|
||||
// As of CLDR 25, mo is no longer considered a legacy mapping.
|
||||
{"mo", "mo", Legacy | CLDR},
|
||||
{"und-AN", "und-AN", Deprecated},
|
||||
{"und-YD", "und-YE", DeprecatedRegion},
|
||||
{"und-YD", "und-YD", DeprecatedBase},
|
||||
{"und-Qaai", "und-Zinh", DeprecatedScript},
|
||||
{"und-Qaai", "und-Qaai", DeprecatedBase},
|
||||
{"drh", "mn", All}, // drh -> khk -> mn
|
||||
}
|
||||
for i, tt := range tests {
|
||||
in, _ := Raw.Parse(tt.in)
|
||||
in, _ = tt.option.Canonicalize(in)
|
||||
if in.String() != tt.out {
|
||||
t.Errorf("%d:%s: was %s; want %s", i, tt.in, in.String(), tt.out)
|
||||
}
|
||||
if int(in.pVariant) > int(in.pExt) || int(in.pExt) > len(in.str) {
|
||||
t.Errorf("%d:%s:offsets %d <= %d <= %d must be true", i, tt.in, in.pVariant, in.pExt, len(in.str))
|
||||
}
|
||||
}
|
||||
// Test idempotence.
|
||||
for _, base := range Supported.BaseLanguages() {
|
||||
tag, _ := Raw.Compose(base)
|
||||
got, _ := All.Canonicalize(tag)
|
||||
want, _ := All.Canonicalize(got)
|
||||
if got != want {
|
||||
t.Errorf("idem(%s): got %s; want %s", tag, got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTypeForKey(t *testing.T) {
|
||||
tests := []struct{ key, in, out string }{
|
||||
{"co", "en", ""},
|
||||
{"co", "en-u-abc", ""},
|
||||
{"co", "en-u-co-phonebk", "phonebk"},
|
||||
{"co", "en-u-co-phonebk-cu-aud", "phonebk"},
|
||||
{"co", "x-foo-u-co-phonebk", ""},
|
||||
{"nu", "en-u-co-phonebk-nu-arabic", "arabic"},
|
||||
{"kc", "cmn-u-co-stroke", ""},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
if v := Make(tt.in).TypeForKey(tt.key); v != tt.out {
|
||||
t.Errorf("%q[%q]: was %q; want %q", tt.in, tt.key, v, tt.out)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetTypeForKey(t *testing.T) {
|
||||
tests := []struct {
|
||||
key, value, in, out string
|
||||
err bool
|
||||
}{
|
||||
// replace existing value
|
||||
{"co", "pinyin", "en-u-co-phonebk", "en-u-co-pinyin", false},
|
||||
{"co", "pinyin", "en-u-co-phonebk-cu-xau", "en-u-co-pinyin-cu-xau", false},
|
||||
{"co", "pinyin", "en-u-co-phonebk-v-xx", "en-u-co-pinyin-v-xx", false},
|
||||
{"co", "pinyin", "en-u-co-phonebk-x-x", "en-u-co-pinyin-x-x", false},
|
||||
{"nu", "arabic", "en-u-co-phonebk-nu-vaai", "en-u-co-phonebk-nu-arabic", false},
|
||||
// add to existing -u extension
|
||||
{"co", "pinyin", "en-u-ca-gregory", "en-u-ca-gregory-co-pinyin", false},
|
||||
{"co", "pinyin", "en-u-ca-gregory-nu-vaai", "en-u-ca-gregory-co-pinyin-nu-vaai", false},
|
||||
{"co", "pinyin", "en-u-ca-gregory-v-va", "en-u-ca-gregory-co-pinyin-v-va", false},
|
||||
{"co", "pinyin", "en-u-ca-gregory-x-a", "en-u-ca-gregory-co-pinyin-x-a", false},
|
||||
{"ca", "gregory", "en-u-co-pinyin", "en-u-ca-gregory-co-pinyin", false},
|
||||
// remove pair
|
||||
{"co", "", "en-u-co-phonebk", "en", false},
|
||||
{"co", "", "en-u-ca-gregory-co-phonebk", "en-u-ca-gregory", false},
|
||||
{"co", "", "en-u-co-phonebk-nu-arabic", "en-u-nu-arabic", false},
|
||||
{"co", "", "en", "en", false},
|
||||
// add -u extension
|
||||
{"co", "pinyin", "en", "en-u-co-pinyin", false},
|
||||
{"co", "pinyin", "und", "und-u-co-pinyin", false},
|
||||
{"co", "pinyin", "en-a-aaa", "en-a-aaa-u-co-pinyin", false},
|
||||
{"co", "pinyin", "en-x-aaa", "en-u-co-pinyin-x-aaa", false},
|
||||
{"co", "pinyin", "en-v-aa", "en-u-co-pinyin-v-aa", false},
|
||||
{"co", "pinyin", "en-a-aaa-x-x", "en-a-aaa-u-co-pinyin-x-x", false},
|
||||
{"co", "pinyin", "en-a-aaa-v-va", "en-a-aaa-u-co-pinyin-v-va", false},
|
||||
// error on invalid values
|
||||
{"co", "pinyinxxx", "en", "en", true},
|
||||
{"co", "piny.n", "en", "en", true},
|
||||
{"co", "pinyinxxx", "en-a-aaa", "en-a-aaa", true},
|
||||
{"co", "pinyinxxx", "en-u-aaa", "en-u-aaa", true},
|
||||
{"co", "pinyinxxx", "en-u-aaa-co-pinyin", "en-u-aaa-co-pinyin", true},
|
||||
{"co", "pinyi.", "en-u-aaa-co-pinyin", "en-u-aaa-co-pinyin", true},
|
||||
{"col", "pinyin", "en", "en", true},
|
||||
{"co", "cu", "en", "en", true},
|
||||
// error when setting on a private use tag
|
||||
{"co", "phonebook", "x-foo", "x-foo", true},
|
||||
}
|
||||
for i, tt := range tests {
|
||||
tag := Make(tt.in)
|
||||
if v, err := tag.SetTypeForKey(tt.key, tt.value); v.String() != tt.out {
|
||||
t.Errorf("%d:%q[%q]=%q: was %q; want %q", i, tt.in, tt.key, tt.value, v, tt.out)
|
||||
} else if (err != nil) != tt.err {
|
||||
t.Errorf("%d:%q[%q]=%q: error was %v; want %v", i, tt.in, tt.key, tt.value, err != nil, tt.err)
|
||||
} else if val := v.TypeForKey(tt.key); err == nil && val != tt.value {
|
||||
t.Errorf("%d:%q[%q]==%q: was %v; want %v", i, tt.out, tt.key, tt.value, val, tt.value)
|
||||
}
|
||||
if len(tag.String()) <= 3 {
|
||||
// Simulate a tag for which the string has not been set.
|
||||
tag.str, tag.pExt, tag.pVariant = "", 0, 0
|
||||
if tag, err := tag.SetTypeForKey(tt.key, tt.value); err == nil {
|
||||
if val := tag.TypeForKey(tt.key); err == nil && val != tt.value {
|
||||
t.Errorf("%d:%q[%q]==%q: was %v; want %v", i, tt.out, tt.key, tt.value, val, tt.value)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFindKeyAndType(t *testing.T) {
|
||||
// out is either the matched type in case of a match or the original
|
||||
// string up till the insertion point.
|
||||
tests := []struct {
|
||||
key string
|
||||
hasExt bool
|
||||
in, out string
|
||||
}{
|
||||
// Don't search past a private use extension.
|
||||
{"co", false, "en-x-foo-u-co-pinyin", "en"},
|
||||
{"co", false, "x-foo-u-co-pinyin", ""},
|
||||
{"co", false, "en-s-fff-x-foo", "en-s-fff"},
|
||||
// Insertion points in absence of -u extension.
|
||||
{"cu", false, "en", ""}, // t.str is ""
|
||||
{"cu", false, "en-v-va", "en"},
|
||||
{"cu", false, "en-a-va", "en-a-va"},
|
||||
{"cu", false, "en-a-va-v-va", "en-a-va"},
|
||||
{"cu", false, "en-x-a", "en"},
|
||||
// Tags with the -u extension.
|
||||
{"co", true, "en-u-co-standard", "standard"},
|
||||
{"co", true, "yue-u-co-pinyin", "pinyin"},
|
||||
{"co", true, "en-u-co-abc", "abc"},
|
||||
{"co", true, "en-u-co-abc-def", "abc-def"},
|
||||
{"co", true, "en-u-co-abc-def-x-foo", "abc-def"},
|
||||
{"co", true, "en-u-co-standard-nu-arab", "standard"},
|
||||
{"co", true, "yue-u-co-pinyin-nu-arab", "pinyin"},
|
||||
// Insertion points.
|
||||
{"cu", true, "en-u-co-standard", "en-u-co-standard"},
|
||||
{"cu", true, "yue-u-co-pinyin-x-foo", "yue-u-co-pinyin"},
|
||||
{"cu", true, "en-u-co-abc", "en-u-co-abc"},
|
||||
{"cu", true, "en-u-nu-arabic", "en-u"},
|
||||
{"cu", true, "en-u-co-abc-def-nu-arabic", "en-u-co-abc-def"},
|
||||
}
|
||||
for i, tt := range tests {
|
||||
start, end, hasExt := Make(tt.in).findTypeForKey(tt.key)
|
||||
if start != end {
|
||||
res := tt.in[start:end]
|
||||
if res != tt.out {
|
||||
t.Errorf("%d:%s: was %q; want %q", i, tt.in, res, tt.out)
|
||||
}
|
||||
} else {
|
||||
if hasExt != tt.hasExt {
|
||||
t.Errorf("%d:%s: hasExt was %v; want %v", i, tt.in, hasExt, tt.hasExt)
|
||||
continue
|
||||
}
|
||||
if tt.in[:start] != tt.out {
|
||||
t.Errorf("%d:%s: insertion point was %q; want %q", i, tt.in, tt.in[:start], tt.out)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestParent(t *testing.T) {
|
||||
tests := []struct{ in, out string }{
|
||||
// Strip variants and extensions first
|
||||
{"de-u-co-phonebk", "de"},
|
||||
{"de-1994", "de"},
|
||||
{"de-Latn-1994", "de"}, // remove superfluous script.
|
||||
|
||||
// Ensure the canonical Tag for an entry is in the chain for base-script
|
||||
// pairs.
|
||||
{"zh-Hans", "zh"},
|
||||
|
||||
// Skip the script if it is the maximized version. CLDR files for the
|
||||
// skipped tag are always empty.
|
||||
{"zh-Hans-TW", "zh"},
|
||||
{"zh-Hans-CN", "zh"},
|
||||
|
||||
// Insert the script if the maximized script is not the same as the
|
||||
// maximized script of the base language.
|
||||
{"zh-TW", "zh-Hant"},
|
||||
{"zh-HK", "zh-Hant"},
|
||||
{"zh-Hant-TW", "zh-Hant"},
|
||||
{"zh-Hant-HK", "zh-Hant"},
|
||||
|
||||
// Non-default script skips to und.
|
||||
// CLDR
|
||||
{"az-Cyrl", "und"},
|
||||
{"bs-Cyrl", "und"},
|
||||
{"en-Dsrt", "und"},
|
||||
{"ha-Arab", "und"},
|
||||
{"mn-Mong", "und"},
|
||||
{"pa-Arab", "und"},
|
||||
{"shi-Latn", "und"},
|
||||
{"sr-Latn", "und"},
|
||||
{"uz-Arab", "und"},
|
||||
{"uz-Cyrl", "und"},
|
||||
{"vai-Latn", "und"},
|
||||
{"zh-Hant", "und"},
|
||||
// extra
|
||||
{"nl-Cyrl", "und"},
|
||||
|
||||
// World english inherits from en-001.
|
||||
{"en-150", "en-001"},
|
||||
{"en-AU", "en-001"},
|
||||
{"en-BE", "en-001"},
|
||||
{"en-GG", "en-001"},
|
||||
{"en-GI", "en-001"},
|
||||
{"en-HK", "en-001"},
|
||||
{"en-IE", "en-001"},
|
||||
{"en-IM", "en-001"},
|
||||
{"en-IN", "en-001"},
|
||||
{"en-JE", "en-001"},
|
||||
{"en-MT", "en-001"},
|
||||
{"en-NZ", "en-001"},
|
||||
{"en-PK", "en-001"},
|
||||
{"en-SG", "en-001"},
|
||||
|
||||
// Spanish in Latin-American countries have es-419 as parent.
|
||||
{"es-AR", "es-419"},
|
||||
{"es-BO", "es-419"},
|
||||
{"es-CL", "es-419"},
|
||||
{"es-CO", "es-419"},
|
||||
{"es-CR", "es-419"},
|
||||
{"es-CU", "es-419"},
|
||||
{"es-DO", "es-419"},
|
||||
{"es-EC", "es-419"},
|
||||
{"es-GT", "es-419"},
|
||||
{"es-HN", "es-419"},
|
||||
{"es-MX", "es-419"},
|
||||
{"es-NI", "es-419"},
|
||||
{"es-PA", "es-419"},
|
||||
{"es-PE", "es-419"},
|
||||
{"es-PR", "es-419"},
|
||||
{"es-PY", "es-419"},
|
||||
{"es-SV", "es-419"},
|
||||
{"es-US", "es-419"},
|
||||
{"es-UY", "es-419"},
|
||||
{"es-VE", "es-419"},
|
||||
// exceptions (according to CLDR)
|
||||
{"es-CW", "es"},
|
||||
|
||||
// Inherit from pt-PT, instead of pt for these countries.
|
||||
{"pt-AO", "pt-PT"},
|
||||
{"pt-CV", "pt-PT"},
|
||||
{"pt-GW", "pt-PT"},
|
||||
{"pt-MO", "pt-PT"},
|
||||
{"pt-MZ", "pt-PT"},
|
||||
{"pt-ST", "pt-PT"},
|
||||
{"pt-TL", "pt-PT"},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
tag := Raw.MustParse(tt.in)
|
||||
if p := Raw.MustParse(tt.out); p != tag.Parent() {
|
||||
t.Errorf("%s: was %v; want %v", tt.in, tag.Parent(), p)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
// Tags without error that don't need to be changed.
|
||||
benchBasic = []string{
|
||||
"en",
|
||||
"en-Latn",
|
||||
"en-GB",
|
||||
"za",
|
||||
"zh-Hant",
|
||||
"zh",
|
||||
"zh-HK",
|
||||
"ar-MK",
|
||||
"en-CA",
|
||||
"fr-CA",
|
||||
"fr-CH",
|
||||
"fr",
|
||||
"lv",
|
||||
"he-IT",
|
||||
"tlh",
|
||||
"ja",
|
||||
"ja-Jpan",
|
||||
"ja-Jpan-JP",
|
||||
"de-1996",
|
||||
"de-CH",
|
||||
"sr",
|
||||
"sr-Latn",
|
||||
}
|
||||
// Tags with extensions, not changes required.
|
||||
benchExt = []string{
|
||||
"x-a-b-c-d",
|
||||
"x-aa-bbbb-cccccccc-d",
|
||||
"en-x_cc-b-bbb-a-aaa",
|
||||
"en-c_cc-b-bbb-a-aaa-x-x",
|
||||
"en-u-co-phonebk",
|
||||
"en-Cyrl-u-co-phonebk",
|
||||
"en-US-u-co-phonebk-cu-xau",
|
||||
"en-nedix-u-co-phonebk",
|
||||
"en-t-t0-abcd",
|
||||
"en-t-nl-latn",
|
||||
"en-t-t0-abcd-x-a",
|
||||
}
|
||||
// Change, but not memory allocation required.
|
||||
benchSimpleChange = []string{
|
||||
"EN",
|
||||
"i-klingon",
|
||||
"en-latn",
|
||||
"zh-cmn-Hans-CN",
|
||||
"iw-NL",
|
||||
}
|
||||
// Change and memory allocation required.
|
||||
benchChangeAlloc = []string{
|
||||
"en-c_cc-b-bbb-a-aaa",
|
||||
"en-u-cu-xua-co-phonebk",
|
||||
"en-u-cu-xua-co-phonebk-a-cd",
|
||||
"en-u-def-abc-cu-xua-co-phonebk",
|
||||
"en-t-en-Cyrl-NL-1994",
|
||||
"en-t-en-Cyrl-NL-1994-t0-abc-def",
|
||||
}
|
||||
// Tags that result in errors.
|
||||
benchErr = []string{
|
||||
// IllFormed
|
||||
"x_A.-B-C_D",
|
||||
"en-u-cu-co-phonebk",
|
||||
"en-u-cu-xau-co",
|
||||
"en-t-nl-abcd",
|
||||
// Invalid
|
||||
"xx",
|
||||
"nl-Uuuu",
|
||||
"nl-QB",
|
||||
}
|
||||
benchChange = append(benchSimpleChange, benchChangeAlloc...)
|
||||
benchAll = append(append(append(benchBasic, benchExt...), benchChange...), benchErr...)
|
||||
)
|
||||
|
||||
func doParse(b *testing.B, tag []string) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
// Use the modulo instead of looping over all tags so that we get a somewhat
|
||||
// meaningful ns/op.
|
||||
Parse(tag[i%len(tag)])
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkParse(b *testing.B) {
|
||||
doParse(b, benchAll)
|
||||
}
|
||||
|
||||
func BenchmarkParseBasic(b *testing.B) {
|
||||
doParse(b, benchBasic)
|
||||
}
|
||||
|
||||
func BenchmarkParseError(b *testing.B) {
|
||||
doParse(b, benchErr)
|
||||
}
|
||||
|
||||
func BenchmarkParseSimpleChange(b *testing.B) {
|
||||
doParse(b, benchSimpleChange)
|
||||
}
|
||||
|
||||
func BenchmarkParseChangeAlloc(b *testing.B) {
|
||||
doParse(b, benchChangeAlloc)
|
||||
}
|
396
vendor/golang.org/x/text/language/lookup.go
generated
vendored
Normal file
396
vendor/golang.org/x/text/language/lookup.go
generated
vendored
Normal file
@ -0,0 +1,396 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package language
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strconv"
|
||||
|
||||
"golang.org/x/text/internal/tag"
|
||||
)
|
||||
|
||||
// findIndex tries to find the given tag in idx and returns a standardized error
|
||||
// if it could not be found.
|
||||
func findIndex(idx tag.Index, key []byte, form string) (index int, err error) {
|
||||
if !tag.FixCase(form, key) {
|
||||
return 0, errSyntax
|
||||
}
|
||||
i := idx.Index(key)
|
||||
if i == -1 {
|
||||
return 0, mkErrInvalid(key)
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func searchUint(imap []uint16, key uint16) int {
|
||||
return sort.Search(len(imap), func(i int) bool {
|
||||
return imap[i] >= key
|
||||
})
|
||||
}
|
||||
|
||||
type langID uint16
|
||||
|
||||
// getLangID returns the langID of s if s is a canonical subtag
|
||||
// or langUnknown if s is not a canonical subtag.
|
||||
func getLangID(s []byte) (langID, error) {
|
||||
if len(s) == 2 {
|
||||
return getLangISO2(s)
|
||||
}
|
||||
return getLangISO3(s)
|
||||
}
|
||||
|
||||
// mapLang returns the mapped langID of id according to mapping m.
|
||||
func normLang(id langID) (langID, langAliasType) {
|
||||
k := sort.Search(len(langAliasMap), func(i int) bool {
|
||||
return langAliasMap[i].from >= uint16(id)
|
||||
})
|
||||
if k < len(langAliasMap) && langAliasMap[k].from == uint16(id) {
|
||||
return langID(langAliasMap[k].to), langAliasTypes[k]
|
||||
}
|
||||
return id, langAliasTypeUnknown
|
||||
}
|
||||
|
||||
// getLangISO2 returns the langID for the given 2-letter ISO language code
|
||||
// or unknownLang if this does not exist.
|
||||
func getLangISO2(s []byte) (langID, error) {
|
||||
if !tag.FixCase("zz", s) {
|
||||
return 0, errSyntax
|
||||
}
|
||||
if i := lang.Index(s); i != -1 && lang.Elem(i)[3] != 0 {
|
||||
return langID(i), nil
|
||||
}
|
||||
return 0, mkErrInvalid(s)
|
||||
}
|
||||
|
||||
const base = 'z' - 'a' + 1
|
||||
|
||||
func strToInt(s []byte) uint {
|
||||
v := uint(0)
|
||||
for i := 0; i < len(s); i++ {
|
||||
v *= base
|
||||
v += uint(s[i] - 'a')
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// converts the given integer to the original ASCII string passed to strToInt.
|
||||
// len(s) must match the number of characters obtained.
|
||||
func intToStr(v uint, s []byte) {
|
||||
for i := len(s) - 1; i >= 0; i-- {
|
||||
s[i] = byte(v%base) + 'a'
|
||||
v /= base
|
||||
}
|
||||
}
|
||||
|
||||
// getLangISO3 returns the langID for the given 3-letter ISO language code
|
||||
// or unknownLang if this does not exist.
|
||||
func getLangISO3(s []byte) (langID, error) {
|
||||
if tag.FixCase("und", s) {
|
||||
// first try to match canonical 3-letter entries
|
||||
for i := lang.Index(s[:2]); i != -1; i = lang.Next(s[:2], i) {
|
||||
if e := lang.Elem(i); e[3] == 0 && e[2] == s[2] {
|
||||
// We treat "und" as special and always translate it to "unspecified".
|
||||
// Note that ZZ and Zzzz are private use and are not treated as
|
||||
// unspecified by default.
|
||||
id := langID(i)
|
||||
if id == nonCanonicalUnd {
|
||||
return 0, nil
|
||||
}
|
||||
return id, nil
|
||||
}
|
||||
}
|
||||
if i := altLangISO3.Index(s); i != -1 {
|
||||
return langID(altLangIndex[altLangISO3.Elem(i)[3]]), nil
|
||||
}
|
||||
n := strToInt(s)
|
||||
if langNoIndex[n/8]&(1<<(n%8)) != 0 {
|
||||
return langID(n) + langNoIndexOffset, nil
|
||||
}
|
||||
// Check for non-canonical uses of ISO3.
|
||||
for i := lang.Index(s[:1]); i != -1; i = lang.Next(s[:1], i) {
|
||||
if e := lang.Elem(i); e[2] == s[1] && e[3] == s[2] {
|
||||
return langID(i), nil
|
||||
}
|
||||
}
|
||||
return 0, mkErrInvalid(s)
|
||||
}
|
||||
return 0, errSyntax
|
||||
}
|
||||
|
||||
// stringToBuf writes the string to b and returns the number of bytes
|
||||
// written. cap(b) must be >= 3.
|
||||
func (id langID) stringToBuf(b []byte) int {
|
||||
if id >= langNoIndexOffset {
|
||||
intToStr(uint(id)-langNoIndexOffset, b[:3])
|
||||
return 3
|
||||
} else if id == 0 {
|
||||
return copy(b, "und")
|
||||
}
|
||||
l := lang[id<<2:]
|
||||
if l[3] == 0 {
|
||||
return copy(b, l[:3])
|
||||
}
|
||||
return copy(b, l[:2])
|
||||
}
|
||||
|
||||
// String returns the BCP 47 representation of the langID.
|
||||
// Use b as variable name, instead of id, to ensure the variable
|
||||
// used is consistent with that of Base in which this type is embedded.
|
||||
func (b langID) String() string {
|
||||
if b == 0 {
|
||||
return "und"
|
||||
} else if b >= langNoIndexOffset {
|
||||
b -= langNoIndexOffset
|
||||
buf := [3]byte{}
|
||||
intToStr(uint(b), buf[:])
|
||||
return string(buf[:])
|
||||
}
|
||||
l := lang.Elem(int(b))
|
||||
if l[3] == 0 {
|
||||
return l[:3]
|
||||
}
|
||||
return l[:2]
|
||||
}
|
||||
|
||||
// ISO3 returns the ISO 639-3 language code.
|
||||
func (b langID) ISO3() string {
|
||||
if b == 0 || b >= langNoIndexOffset {
|
||||
return b.String()
|
||||
}
|
||||
l := lang.Elem(int(b))
|
||||
if l[3] == 0 {
|
||||
return l[:3]
|
||||
} else if l[2] == 0 {
|
||||
return altLangISO3.Elem(int(l[3]))[:3]
|
||||
}
|
||||
// This allocation will only happen for 3-letter ISO codes
|
||||
// that are non-canonical BCP 47 language identifiers.
|
||||
return l[0:1] + l[2:4]
|
||||
}
|
||||
|
||||
// IsPrivateUse reports whether this language code is reserved for private use.
|
||||
func (b langID) IsPrivateUse() bool {
|
||||
return langPrivateStart <= b && b <= langPrivateEnd
|
||||
}
|
||||
|
||||
type regionID uint16
|
||||
|
||||
// getRegionID returns the region id for s if s is a valid 2-letter region code
|
||||
// or unknownRegion.
|
||||
func getRegionID(s []byte) (regionID, error) {
|
||||
if len(s) == 3 {
|
||||
if isAlpha(s[0]) {
|
||||
return getRegionISO3(s)
|
||||
}
|
||||
if i, err := strconv.ParseUint(string(s), 10, 10); err == nil {
|
||||
return getRegionM49(int(i))
|
||||
}
|
||||
}
|
||||
return getRegionISO2(s)
|
||||
}
|
||||
|
||||
// getRegionISO2 returns the regionID for the given 2-letter ISO country code
|
||||
// or unknownRegion if this does not exist.
|
||||
func getRegionISO2(s []byte) (regionID, error) {
|
||||
i, err := findIndex(regionISO, s, "ZZ")
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return regionID(i) + isoRegionOffset, nil
|
||||
}
|
||||
|
||||
// getRegionISO3 returns the regionID for the given 3-letter ISO country code
|
||||
// or unknownRegion if this does not exist.
|
||||
func getRegionISO3(s []byte) (regionID, error) {
|
||||
if tag.FixCase("ZZZ", s) {
|
||||
for i := regionISO.Index(s[:1]); i != -1; i = regionISO.Next(s[:1], i) {
|
||||
if e := regionISO.Elem(i); e[2] == s[1] && e[3] == s[2] {
|
||||
return regionID(i) + isoRegionOffset, nil
|
||||
}
|
||||
}
|
||||
for i := 0; i < len(altRegionISO3); i += 3 {
|
||||
if tag.Compare(altRegionISO3[i:i+3], s) == 0 {
|
||||
return regionID(altRegionIDs[i/3]), nil
|
||||
}
|
||||
}
|
||||
return 0, mkErrInvalid(s)
|
||||
}
|
||||
return 0, errSyntax
|
||||
}
|
||||
|
||||
func getRegionM49(n int) (regionID, error) {
|
||||
if 0 < n && n <= 999 {
|
||||
const (
|
||||
searchBits = 7
|
||||
regionBits = 9
|
||||
regionMask = 1<<regionBits - 1
|
||||
)
|
||||
idx := n >> searchBits
|
||||
buf := fromM49[m49Index[idx]:m49Index[idx+1]]
|
||||
val := uint16(n) << regionBits // we rely on bits shifting out
|
||||
i := sort.Search(len(buf), func(i int) bool {
|
||||
return buf[i] >= val
|
||||
})
|
||||
if r := fromM49[int(m49Index[idx])+i]; r&^regionMask == val {
|
||||
return regionID(r & regionMask), nil
|
||||
}
|
||||
}
|
||||
var e ValueError
|
||||
fmt.Fprint(bytes.NewBuffer([]byte(e.v[:])), n)
|
||||
return 0, e
|
||||
}
|
||||
|
||||
// normRegion returns a region if r is deprecated or 0 otherwise.
|
||||
// TODO: consider supporting BYS (-> BLR), CSK (-> 200 or CZ), PHI (-> PHL) and AFI (-> DJ).
|
||||
// TODO: consider mapping split up regions to new most populous one (like CLDR).
|
||||
func normRegion(r regionID) regionID {
|
||||
m := regionOldMap
|
||||
k := sort.Search(len(m), func(i int) bool {
|
||||
return m[i].from >= uint16(r)
|
||||
})
|
||||
if k < len(m) && m[k].from == uint16(r) {
|
||||
return regionID(m[k].to)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
const (
|
||||
iso3166UserAssigned = 1 << iota
|
||||
ccTLD
|
||||
bcp47Region
|
||||
)
|
||||
|
||||
func (r regionID) typ() byte {
|
||||
return regionTypes[r]
|
||||
}
|
||||
|
||||
// String returns the BCP 47 representation for the region.
|
||||
// It returns "ZZ" for an unspecified region.
|
||||
func (r regionID) String() string {
|
||||
if r < isoRegionOffset {
|
||||
if r == 0 {
|
||||
return "ZZ"
|
||||
}
|
||||
return fmt.Sprintf("%03d", r.M49())
|
||||
}
|
||||
r -= isoRegionOffset
|
||||
return regionISO.Elem(int(r))[:2]
|
||||
}
|
||||
|
||||
// ISO3 returns the 3-letter ISO code of r.
|
||||
// Note that not all regions have a 3-letter ISO code.
|
||||
// In such cases this method returns "ZZZ".
|
||||
func (r regionID) ISO3() string {
|
||||
if r < isoRegionOffset {
|
||||
return "ZZZ"
|
||||
}
|
||||
r -= isoRegionOffset
|
||||
reg := regionISO.Elem(int(r))
|
||||
switch reg[2] {
|
||||
case 0:
|
||||
return altRegionISO3[reg[3]:][:3]
|
||||
case ' ':
|
||||
return "ZZZ"
|
||||
}
|
||||
return reg[0:1] + reg[2:4]
|
||||
}
|
||||
|
||||
// M49 returns the UN M.49 encoding of r, or 0 if this encoding
|
||||
// is not defined for r.
|
||||
func (r regionID) M49() int {
|
||||
return int(m49[r])
|
||||
}
|
||||
|
||||
// IsPrivateUse reports whether r has the ISO 3166 User-assigned status. This
|
||||
// may include private-use tags that are assigned by CLDR and used in this
|
||||
// implementation. So IsPrivateUse and IsCountry can be simultaneously true.
|
||||
func (r regionID) IsPrivateUse() bool {
|
||||
return r.typ()&iso3166UserAssigned != 0
|
||||
}
|
||||
|
||||
type scriptID uint8
|
||||
|
||||
// getScriptID returns the script id for string s. It assumes that s
|
||||
// is of the format [A-Z][a-z]{3}.
|
||||
func getScriptID(idx tag.Index, s []byte) (scriptID, error) {
|
||||
i, err := findIndex(idx, s, "Zzzz")
|
||||
return scriptID(i), err
|
||||
}
|
||||
|
||||
// String returns the script code in title case.
|
||||
// It returns "Zzzz" for an unspecified script.
|
||||
func (s scriptID) String() string {
|
||||
if s == 0 {
|
||||
return "Zzzz"
|
||||
}
|
||||
return script.Elem(int(s))
|
||||
}
|
||||
|
||||
// IsPrivateUse reports whether this script code is reserved for private use.
|
||||
func (s scriptID) IsPrivateUse() bool {
|
||||
return _Qaaa <= s && s <= _Qabx
|
||||
}
|
||||
|
||||
const (
|
||||
maxAltTaglen = len("en-US-POSIX")
|
||||
maxLen = maxAltTaglen
|
||||
)
|
||||
|
||||
var (
|
||||
// grandfatheredMap holds a mapping from legacy and grandfathered tags to
|
||||
// their base language or index to more elaborate tag.
|
||||
grandfatheredMap = map[[maxLen]byte]int16{
|
||||
[maxLen]byte{'a', 'r', 't', '-', 'l', 'o', 'j', 'b', 'a', 'n'}: _jbo, // art-lojban
|
||||
[maxLen]byte{'i', '-', 'a', 'm', 'i'}: _ami, // i-ami
|
||||
[maxLen]byte{'i', '-', 'b', 'n', 'n'}: _bnn, // i-bnn
|
||||
[maxLen]byte{'i', '-', 'h', 'a', 'k'}: _hak, // i-hak
|
||||
[maxLen]byte{'i', '-', 'k', 'l', 'i', 'n', 'g', 'o', 'n'}: _tlh, // i-klingon
|
||||
[maxLen]byte{'i', '-', 'l', 'u', 'x'}: _lb, // i-lux
|
||||
[maxLen]byte{'i', '-', 'n', 'a', 'v', 'a', 'j', 'o'}: _nv, // i-navajo
|
||||
[maxLen]byte{'i', '-', 'p', 'w', 'n'}: _pwn, // i-pwn
|
||||
[maxLen]byte{'i', '-', 't', 'a', 'o'}: _tao, // i-tao
|
||||
[maxLen]byte{'i', '-', 't', 'a', 'y'}: _tay, // i-tay
|
||||
[maxLen]byte{'i', '-', 't', 's', 'u'}: _tsu, // i-tsu
|
||||
[maxLen]byte{'n', 'o', '-', 'b', 'o', 'k'}: _nb, // no-bok
|
||||
[maxLen]byte{'n', 'o', '-', 'n', 'y', 'n'}: _nn, // no-nyn
|
||||
[maxLen]byte{'s', 'g', 'n', '-', 'b', 'e', '-', 'f', 'r'}: _sfb, // sgn-BE-FR
|
||||
[maxLen]byte{'s', 'g', 'n', '-', 'b', 'e', '-', 'n', 'l'}: _vgt, // sgn-BE-NL
|
||||
[maxLen]byte{'s', 'g', 'n', '-', 'c', 'h', '-', 'd', 'e'}: _sgg, // sgn-CH-DE
|
||||
[maxLen]byte{'z', 'h', '-', 'g', 'u', 'o', 'y', 'u'}: _cmn, // zh-guoyu
|
||||
[maxLen]byte{'z', 'h', '-', 'h', 'a', 'k', 'k', 'a'}: _hak, // zh-hakka
|
||||
[maxLen]byte{'z', 'h', '-', 'm', 'i', 'n', '-', 'n', 'a', 'n'}: _nan, // zh-min-nan
|
||||
[maxLen]byte{'z', 'h', '-', 'x', 'i', 'a', 'n', 'g'}: _hsn, // zh-xiang
|
||||
|
||||
// Grandfathered tags with no modern replacement will be converted as
|
||||
// follows:
|
||||
[maxLen]byte{'c', 'e', 'l', '-', 'g', 'a', 'u', 'l', 'i', 's', 'h'}: -1, // cel-gaulish
|
||||
[maxLen]byte{'e', 'n', '-', 'g', 'b', '-', 'o', 'e', 'd'}: -2, // en-GB-oed
|
||||
[maxLen]byte{'i', '-', 'd', 'e', 'f', 'a', 'u', 'l', 't'}: -3, // i-default
|
||||
[maxLen]byte{'i', '-', 'e', 'n', 'o', 'c', 'h', 'i', 'a', 'n'}: -4, // i-enochian
|
||||
[maxLen]byte{'i', '-', 'm', 'i', 'n', 'g', 'o'}: -5, // i-mingo
|
||||
[maxLen]byte{'z', 'h', '-', 'm', 'i', 'n'}: -6, // zh-min
|
||||
|
||||
// CLDR-specific tag.
|
||||
[maxLen]byte{'r', 'o', 'o', 't'}: 0, // root
|
||||
[maxLen]byte{'e', 'n', '-', 'u', 's', '-', 'p', 'o', 's', 'i', 'x'}: -7, // en_US_POSIX"
|
||||
}
|
||||
|
||||
altTagIndex = [...]uint8{0, 17, 31, 45, 61, 74, 86, 102}
|
||||
|
||||
altTags = "xtg-x-cel-gaulishen-GB-oxendicten-x-i-defaultund-x-i-enochiansee-x-i-mingonan-x-zh-minen-US-u-va-posix"
|
||||
)
|
||||
|
||||
func grandfathered(s [maxAltTaglen]byte) (t Tag, ok bool) {
|
||||
if v, ok := grandfatheredMap[s]; ok {
|
||||
if v < 0 {
|
||||
return Make(altTags[altTagIndex[-v-1]:altTagIndex[-v]]), true
|
||||
}
|
||||
t.lang = langID(v)
|
||||
return t, true
|
||||
}
|
||||
return t, false
|
||||
}
|
457
vendor/golang.org/x/text/language/lookup_test.go
generated
vendored
Normal file
457
vendor/golang.org/x/text/language/lookup_test.go
generated
vendored
Normal file
@ -0,0 +1,457 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package language
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"golang.org/x/text/internal/tag"
|
||||
)
|
||||
|
||||
func b(s string) []byte {
|
||||
return []byte(s)
|
||||
}
|
||||
|
||||
func TestLangID(t *testing.T) {
|
||||
tests := []struct {
|
||||
id, bcp47, iso3, norm string
|
||||
err error
|
||||
}{
|
||||
{id: "", bcp47: "und", iso3: "und", err: errSyntax},
|
||||
{id: " ", bcp47: "und", iso3: "und", err: errSyntax},
|
||||
{id: " ", bcp47: "und", iso3: "und", err: errSyntax},
|
||||
{id: " ", bcp47: "und", iso3: "und", err: errSyntax},
|
||||
{id: "xxx", bcp47: "und", iso3: "und", err: mkErrInvalid([]byte("xxx"))},
|
||||
{id: "und", bcp47: "und", iso3: "und"},
|
||||
{id: "aju", bcp47: "aju", iso3: "aju", norm: "jrb"},
|
||||
{id: "jrb", bcp47: "jrb", iso3: "jrb"},
|
||||
{id: "es", bcp47: "es", iso3: "spa"},
|
||||
{id: "spa", bcp47: "es", iso3: "spa"},
|
||||
{id: "ji", bcp47: "ji", iso3: "yid-", norm: "yi"},
|
||||
{id: "jw", bcp47: "jw", iso3: "jav-", norm: "jv"},
|
||||
{id: "ar", bcp47: "ar", iso3: "ara"},
|
||||
{id: "kw", bcp47: "kw", iso3: "cor"},
|
||||
{id: "arb", bcp47: "arb", iso3: "arb", norm: "ar"},
|
||||
{id: "ar", bcp47: "ar", iso3: "ara"},
|
||||
{id: "kur", bcp47: "ku", iso3: "kur"},
|
||||
{id: "nl", bcp47: "nl", iso3: "nld"},
|
||||
{id: "NL", bcp47: "nl", iso3: "nld"},
|
||||
{id: "gsw", bcp47: "gsw", iso3: "gsw"},
|
||||
{id: "gSW", bcp47: "gsw", iso3: "gsw"},
|
||||
{id: "und", bcp47: "und", iso3: "und"},
|
||||
{id: "sh", bcp47: "sh", iso3: "hbs", norm: "sr"},
|
||||
{id: "hbs", bcp47: "sh", iso3: "hbs", norm: "sr"},
|
||||
{id: "no", bcp47: "no", iso3: "nor", norm: "no"},
|
||||
{id: "nor", bcp47: "no", iso3: "nor", norm: "no"},
|
||||
{id: "cmn", bcp47: "cmn", iso3: "cmn", norm: "zh"},
|
||||
}
|
||||
for i, tt := range tests {
|
||||
want, err := getLangID(b(tt.id))
|
||||
if err != tt.err {
|
||||
t.Errorf("%d:err(%s): found %q; want %q", i, tt.id, err, tt.err)
|
||||
}
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if id, _ := getLangISO2(b(tt.bcp47)); len(tt.bcp47) == 2 && want != id {
|
||||
t.Errorf("%d:getISO2(%s): found %v; want %v", i, tt.bcp47, id, want)
|
||||
}
|
||||
if len(tt.iso3) == 3 {
|
||||
if id, _ := getLangISO3(b(tt.iso3)); want != id {
|
||||
t.Errorf("%d:getISO3(%s): found %q; want %q", i, tt.iso3, id, want)
|
||||
}
|
||||
if id, _ := getLangID(b(tt.iso3)); want != id {
|
||||
t.Errorf("%d:getID3(%s): found %v; want %v", i, tt.iso3, id, want)
|
||||
}
|
||||
}
|
||||
norm := want
|
||||
if tt.norm != "" {
|
||||
norm, _ = getLangID(b(tt.norm))
|
||||
}
|
||||
id, _ := normLang(want)
|
||||
if id != norm {
|
||||
t.Errorf("%d:norm(%s): found %v; want %v", i, tt.id, id, norm)
|
||||
}
|
||||
if id := want.String(); tt.bcp47 != id {
|
||||
t.Errorf("%d:String(): found %s; want %s", i, id, tt.bcp47)
|
||||
}
|
||||
if id := want.ISO3(); tt.iso3[:3] != id {
|
||||
t.Errorf("%d:iso3(): found %s; want %s", i, id, tt.iso3[:3])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGrandfathered(t *testing.T) {
|
||||
for _, tt := range []struct{ in, out string }{
|
||||
{"art-lojban", "jbo"},
|
||||
{"i-ami", "ami"},
|
||||
{"i-bnn", "bnn"},
|
||||
{"i-hak", "hak"},
|
||||
{"i-klingon", "tlh"},
|
||||
{"i-lux", "lb"},
|
||||
{"i-navajo", "nv"},
|
||||
{"i-pwn", "pwn"},
|
||||
{"i-tao", "tao"},
|
||||
{"i-tay", "tay"},
|
||||
{"i-tsu", "tsu"},
|
||||
{"no-bok", "nb"},
|
||||
{"no-nyn", "nn"},
|
||||
{"sgn-BE-FR", "sfb"},
|
||||
{"sgn-BE-NL", "vgt"},
|
||||
{"sgn-CH-DE", "sgg"},
|
||||
{"sgn-ch-de", "sgg"},
|
||||
{"zh-guoyu", "cmn"},
|
||||
{"zh-hakka", "hak"},
|
||||
{"zh-min-nan", "nan"},
|
||||
{"zh-xiang", "hsn"},
|
||||
|
||||
// Grandfathered tags with no modern replacement will be converted as follows:
|
||||
{"cel-gaulish", "xtg-x-cel-gaulish"},
|
||||
{"en-GB-oed", "en-GB-oxendict"},
|
||||
{"en-gb-oed", "en-GB-oxendict"},
|
||||
{"i-default", "en-x-i-default"},
|
||||
{"i-enochian", "und-x-i-enochian"},
|
||||
{"i-mingo", "see-x-i-mingo"},
|
||||
{"zh-min", "nan-x-zh-min"},
|
||||
|
||||
{"root", "und"},
|
||||
{"en_US_POSIX", "en-US-u-va-posix"},
|
||||
{"en_us_posix", "en-US-u-va-posix"},
|
||||
{"en-us-posix", "en-US-u-va-posix"},
|
||||
} {
|
||||
got := Raw.Make(tt.in)
|
||||
want := Raw.MustParse(tt.out)
|
||||
if got != want {
|
||||
t.Errorf("%s: got %q; want %q", tt.in, got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRegionID(t *testing.T) {
|
||||
tests := []struct {
|
||||
in, out string
|
||||
}{
|
||||
{"_ ", ""},
|
||||
{"_000", ""},
|
||||
{"419", "419"},
|
||||
{"AA", "AA"},
|
||||
{"ATF", "TF"},
|
||||
{"HV", "HV"},
|
||||
{"CT", "CT"},
|
||||
{"DY", "DY"},
|
||||
{"IC", "IC"},
|
||||
{"FQ", "FQ"},
|
||||
{"JT", "JT"},
|
||||
{"ZZ", "ZZ"},
|
||||
{"EU", "EU"},
|
||||
{"QO", "QO"},
|
||||
{"FX", "FX"},
|
||||
}
|
||||
for i, tt := range tests {
|
||||
if tt.in[0] == '_' {
|
||||
id := tt.in[1:]
|
||||
if _, err := getRegionID(b(id)); err == nil {
|
||||
t.Errorf("%d:err(%s): found nil; want error", i, id)
|
||||
}
|
||||
continue
|
||||
}
|
||||
want, _ := getRegionID(b(tt.in))
|
||||
if s := want.String(); s != tt.out {
|
||||
t.Errorf("%d:%s: found %q; want %q", i, tt.in, s, tt.out)
|
||||
}
|
||||
if len(tt.in) == 2 {
|
||||
want, _ := getRegionISO2(b(tt.in))
|
||||
if s := want.String(); s != tt.out {
|
||||
t.Errorf("%d:getISO2(%s): found %q; want %q", i, tt.in, s, tt.out)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRegionType(t *testing.T) {
|
||||
for _, tt := range []struct {
|
||||
r string
|
||||
t byte
|
||||
}{
|
||||
{"NL", bcp47Region | ccTLD},
|
||||
{"EU", bcp47Region | ccTLD}, // exceptionally reserved
|
||||
{"AN", bcp47Region | ccTLD}, // transitionally reserved
|
||||
|
||||
{"DD", bcp47Region}, // deleted in ISO, deprecated in BCP 47
|
||||
{"NT", bcp47Region}, // transitionally reserved, deprecated in BCP 47
|
||||
|
||||
{"XA", iso3166UserAssigned | bcp47Region},
|
||||
{"ZZ", iso3166UserAssigned | bcp47Region},
|
||||
{"AA", iso3166UserAssigned | bcp47Region},
|
||||
{"QO", iso3166UserAssigned | bcp47Region},
|
||||
{"QM", iso3166UserAssigned | bcp47Region},
|
||||
{"XK", iso3166UserAssigned | bcp47Region},
|
||||
|
||||
{"CT", 0}, // deleted in ISO, not in BCP 47, canonicalized in CLDR
|
||||
} {
|
||||
r := MustParseRegion(tt.r)
|
||||
if tp := r.typ(); tp != tt.t {
|
||||
t.Errorf("Type(%s): got %x; want %x", tt.r, tp, tt.t)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRegionISO3(t *testing.T) {
|
||||
tests := []struct {
|
||||
from, iso3, to string
|
||||
}{
|
||||
{" ", "ZZZ", "ZZ"},
|
||||
{"000", "ZZZ", "ZZ"},
|
||||
{"AA", "AAA", ""},
|
||||
{"CT", "CTE", ""},
|
||||
{"DY", "DHY", ""},
|
||||
{"EU", "QUU", ""},
|
||||
{"HV", "HVO", ""},
|
||||
{"IC", "ZZZ", "ZZ"},
|
||||
{"JT", "JTN", ""},
|
||||
{"PZ", "PCZ", ""},
|
||||
{"QU", "QUU", "EU"},
|
||||
{"QO", "QOO", ""},
|
||||
{"YD", "YMD", ""},
|
||||
{"FQ", "ATF", "TF"},
|
||||
{"TF", "ATF", ""},
|
||||
{"FX", "FXX", ""},
|
||||
{"ZZ", "ZZZ", ""},
|
||||
{"419", "ZZZ", "ZZ"},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
r, _ := getRegionID(b(tt.from))
|
||||
if s := r.ISO3(); s != tt.iso3 {
|
||||
t.Errorf("iso3(%q): found %q; want %q", tt.from, s, tt.iso3)
|
||||
}
|
||||
if tt.iso3 == "" {
|
||||
continue
|
||||
}
|
||||
want := tt.to
|
||||
if tt.to == "" {
|
||||
want = tt.from
|
||||
}
|
||||
r, _ = getRegionID(b(want))
|
||||
if id, _ := getRegionISO3(b(tt.iso3)); id != r {
|
||||
t.Errorf("%s: found %q; want %q", tt.iso3, id, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRegionM49(t *testing.T) {
|
||||
fromTests := []struct {
|
||||
m49 int
|
||||
id string
|
||||
}{
|
||||
{0, ""},
|
||||
{-1, ""},
|
||||
{1000, ""},
|
||||
{10000, ""},
|
||||
|
||||
{001, "001"},
|
||||
{104, "MM"},
|
||||
{180, "CD"},
|
||||
{230, "ET"},
|
||||
{231, "ET"},
|
||||
{249, "FX"},
|
||||
{250, "FR"},
|
||||
{276, "DE"},
|
||||
{278, "DD"},
|
||||
{280, "DE"},
|
||||
{419, "419"},
|
||||
{626, "TL"},
|
||||
{736, "SD"},
|
||||
{840, "US"},
|
||||
{854, "BF"},
|
||||
{891, "CS"},
|
||||
{899, ""},
|
||||
{958, "AA"},
|
||||
{966, "QT"},
|
||||
{967, "EU"},
|
||||
{999, "ZZ"},
|
||||
}
|
||||
for _, tt := range fromTests {
|
||||
id, err := getRegionM49(tt.m49)
|
||||
if want, have := err != nil, tt.id == ""; want != have {
|
||||
t.Errorf("error(%d): have %v; want %v", tt.m49, have, want)
|
||||
continue
|
||||
}
|
||||
r, _ := getRegionID(b(tt.id))
|
||||
if r != id {
|
||||
t.Errorf("region(%d): have %s; want %s", tt.m49, id, r)
|
||||
}
|
||||
}
|
||||
|
||||
toTests := []struct {
|
||||
m49 int
|
||||
id string
|
||||
}{
|
||||
{0, "000"},
|
||||
{0, "IC"}, // Some codes don't have an ID
|
||||
|
||||
{001, "001"},
|
||||
{104, "MM"},
|
||||
{104, "BU"},
|
||||
{180, "CD"},
|
||||
{180, "ZR"},
|
||||
{231, "ET"},
|
||||
{250, "FR"},
|
||||
{249, "FX"},
|
||||
{276, "DE"},
|
||||
{278, "DD"},
|
||||
{419, "419"},
|
||||
{626, "TL"},
|
||||
{626, "TP"},
|
||||
{729, "SD"},
|
||||
{826, "GB"},
|
||||
{840, "US"},
|
||||
{854, "BF"},
|
||||
{891, "YU"},
|
||||
{891, "CS"},
|
||||
{958, "AA"},
|
||||
{966, "QT"},
|
||||
{967, "EU"},
|
||||
{967, "QU"},
|
||||
{999, "ZZ"},
|
||||
// For codes that don't have an M49 code use the replacement value,
|
||||
// if available.
|
||||
{854, "HV"}, // maps to Burkino Faso
|
||||
}
|
||||
for _, tt := range toTests {
|
||||
r, _ := getRegionID(b(tt.id))
|
||||
if r.M49() != tt.m49 {
|
||||
t.Errorf("m49(%q): have %d; want %d", tt.id, r.M49(), tt.m49)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRegionDeprecation(t *testing.T) {
|
||||
tests := []struct{ in, out string }{
|
||||
{"BU", "MM"},
|
||||
{"BUR", "MM"},
|
||||
{"CT", "KI"},
|
||||
{"DD", "DE"},
|
||||
{"DDR", "DE"},
|
||||
{"DY", "BJ"},
|
||||
{"FX", "FR"},
|
||||
{"HV", "BF"},
|
||||
{"JT", "UM"},
|
||||
{"MI", "UM"},
|
||||
{"NH", "VU"},
|
||||
{"NQ", "AQ"},
|
||||
{"PU", "UM"},
|
||||
{"PZ", "PA"},
|
||||
{"QU", "EU"},
|
||||
{"RH", "ZW"},
|
||||
{"TP", "TL"},
|
||||
{"UK", "GB"},
|
||||
{"VD", "VN"},
|
||||
{"WK", "UM"},
|
||||
{"YD", "YE"},
|
||||
{"NL", "NL"},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
rIn, _ := getRegionID([]byte(tt.in))
|
||||
rOut, _ := getRegionISO2([]byte(tt.out))
|
||||
r := normRegion(rIn)
|
||||
if rOut == rIn && r != 0 {
|
||||
t.Errorf("%s: was %q; want %q", tt.in, r, tt.in)
|
||||
}
|
||||
if rOut != rIn && r != rOut {
|
||||
t.Errorf("%s: was %q; want %q", tt.in, r, tt.out)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetScriptID(t *testing.T) {
|
||||
idx := tag.Index("0000BbbbDdddEeeeZzzz\xff\xff\xff\xff")
|
||||
tests := []struct {
|
||||
in string
|
||||
out scriptID
|
||||
}{
|
||||
{" ", 0},
|
||||
{" ", 0},
|
||||
{" ", 0},
|
||||
{"", 0},
|
||||
{"Aaaa", 0},
|
||||
{"Bbbb", 1},
|
||||
{"Dddd", 2},
|
||||
{"dddd", 2},
|
||||
{"dDDD", 2},
|
||||
{"Eeee", 3},
|
||||
{"Zzzz", 4},
|
||||
}
|
||||
for i, tt := range tests {
|
||||
if id, err := getScriptID(idx, b(tt.in)); id != tt.out {
|
||||
t.Errorf("%d:%s: found %d; want %d", i, tt.in, id, tt.out)
|
||||
} else if id == 0 && err == nil {
|
||||
t.Errorf("%d:%s: no error; expected one", i, tt.in)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsPrivateUse(t *testing.T) {
|
||||
type test struct {
|
||||
s string
|
||||
private bool
|
||||
}
|
||||
tests := []test{
|
||||
{"en", false},
|
||||
{"und", false},
|
||||
{"pzn", false},
|
||||
{"qaa", true},
|
||||
{"qtz", true},
|
||||
{"qua", false},
|
||||
}
|
||||
for i, tt := range tests {
|
||||
x, _ := getLangID([]byte(tt.s))
|
||||
if b := x.IsPrivateUse(); b != tt.private {
|
||||
t.Errorf("%d: langID.IsPrivateUse(%s) was %v; want %v", i, tt.s, b, tt.private)
|
||||
}
|
||||
}
|
||||
tests = []test{
|
||||
{"001", false},
|
||||
{"419", false},
|
||||
{"899", false},
|
||||
{"900", false},
|
||||
{"957", false},
|
||||
{"958", true},
|
||||
{"AA", true},
|
||||
{"AC", false},
|
||||
{"EU", false}, // CLDR grouping, exceptionally reserved in ISO.
|
||||
{"QU", true}, // Canonicalizes to EU, User-assigned in ISO.
|
||||
{"QO", true}, // CLDR grouping, User-assigned in ISO.
|
||||
{"QA", false},
|
||||
{"QM", true},
|
||||
{"QZ", true},
|
||||
{"XA", true},
|
||||
{"XK", true}, // Assigned to Kosovo in CLDR, User-assigned in ISO.
|
||||
{"XZ", true},
|
||||
{"ZW", false},
|
||||
{"ZZ", true},
|
||||
}
|
||||
for i, tt := range tests {
|
||||
x, _ := getRegionID([]byte(tt.s))
|
||||
if b := x.IsPrivateUse(); b != tt.private {
|
||||
t.Errorf("%d: regionID.IsPrivateUse(%s) was %v; want %v", i, tt.s, b, tt.private)
|
||||
}
|
||||
}
|
||||
tests = []test{
|
||||
{"Latn", false},
|
||||
{"Laaa", false}, // invalid
|
||||
{"Qaaa", true},
|
||||
{"Qabx", true},
|
||||
{"Qaby", false},
|
||||
{"Zyyy", false},
|
||||
{"Zzzz", false},
|
||||
}
|
||||
for i, tt := range tests {
|
||||
x, _ := getScriptID(script, []byte(tt.s))
|
||||
if b := x.IsPrivateUse(); b != tt.private {
|
||||
t.Errorf("%d: scriptID.IsPrivateUse(%s) was %v; want %v", i, tt.s, b, tt.private)
|
||||
}
|
||||
}
|
||||
}
|
933
vendor/golang.org/x/text/language/match.go
generated
vendored
Normal file
933
vendor/golang.org/x/text/language/match.go
generated
vendored
Normal file
@ -0,0 +1,933 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package language
|
||||
|
||||
import "errors"
|
||||
|
||||
// A MatchOption configures a Matcher.
|
||||
type MatchOption func(*matcher)
|
||||
|
||||
// PreferSameScript will, in the absence of a match, result in the first
|
||||
// preferred tag with the same script as a supported tag to match this supported
|
||||
// tag. The default is currently true, but this may change in the future.
|
||||
func PreferSameScript(preferSame bool) MatchOption {
|
||||
return func(m *matcher) { m.preferSameScript = preferSame }
|
||||
}
|
||||
|
||||
// TODO(v1.0.0): consider making Matcher a concrete type, instead of interface.
|
||||
// There doesn't seem to be too much need for multiple types.
|
||||
// Making it a concrete type allows MatchStrings to be a method, which will
|
||||
// improve its discoverability.
|
||||
|
||||
// MatchStrings parses and matches the given strings until one of them matches
|
||||
// the language in the Matcher. A string may be an Accept-Language header as
|
||||
// handled by ParseAcceptLanguage. The default language is returned if no
|
||||
// other language matched.
|
||||
func MatchStrings(m Matcher, lang ...string) (tag Tag, index int) {
|
||||
for _, accept := range lang {
|
||||
desired, _, err := ParseAcceptLanguage(accept)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if tag, index, conf := m.Match(desired...); conf != No {
|
||||
return tag, index
|
||||
}
|
||||
}
|
||||
tag, index, _ = m.Match()
|
||||
return
|
||||
}
|
||||
|
||||
// Matcher is the interface that wraps the Match method.
|
||||
//
|
||||
// Match returns the best match for any of the given tags, along with
|
||||
// a unique index associated with the returned tag and a confidence
|
||||
// score.
|
||||
type Matcher interface {
|
||||
Match(t ...Tag) (tag Tag, index int, c Confidence)
|
||||
}
|
||||
|
||||
// Comprehends reports the confidence score for a speaker of a given language
|
||||
// to being able to comprehend the written form of an alternative language.
|
||||
func Comprehends(speaker, alternative Tag) Confidence {
|
||||
_, _, c := NewMatcher([]Tag{alternative}).Match(speaker)
|
||||
return c
|
||||
}
|
||||
|
||||
// NewMatcher returns a Matcher that matches an ordered list of preferred tags
|
||||
// against a list of supported tags based on written intelligibility, closeness
|
||||
// of dialect, equivalence of subtags and various other rules. It is initialized
|
||||
// with the list of supported tags. The first element is used as the default
|
||||
// value in case no match is found.
|
||||
//
|
||||
// Its Match method matches the first of the given Tags to reach a certain
|
||||
// confidence threshold. The tags passed to Match should therefore be specified
|
||||
// in order of preference. Extensions are ignored for matching.
|
||||
//
|
||||
// The index returned by the Match method corresponds to the index of the
|
||||
// matched tag in t, but is augmented with the Unicode extension ('u')of the
|
||||
// corresponding preferred tag. This allows user locale options to be passed
|
||||
// transparently.
|
||||
func NewMatcher(t []Tag, options ...MatchOption) Matcher {
|
||||
return newMatcher(t, options)
|
||||
}
|
||||
|
||||
func (m *matcher) Match(want ...Tag) (t Tag, index int, c Confidence) {
|
||||
match, w, c := m.getBest(want...)
|
||||
if match != nil {
|
||||
t, index = match.tag, match.index
|
||||
} else {
|
||||
// TODO: this should be an option
|
||||
t = m.default_.tag
|
||||
if m.preferSameScript {
|
||||
outer:
|
||||
for _, w := range want {
|
||||
script, _ := w.Script()
|
||||
if script.scriptID == 0 {
|
||||
// Don't do anything if there is no script, such as with
|
||||
// private subtags.
|
||||
continue
|
||||
}
|
||||
for i, h := range m.supported {
|
||||
if script.scriptID == h.maxScript {
|
||||
t, index = h.tag, i
|
||||
break outer
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// TODO: select first language tag based on script.
|
||||
}
|
||||
if w.region != 0 && t.region != 0 && t.region.contains(w.region) {
|
||||
t, _ = Raw.Compose(t, Region{w.region})
|
||||
}
|
||||
// Copy options from the user-provided tag into the result tag. This is hard
|
||||
// to do after the fact, so we do it here.
|
||||
// TODO: add in alternative variants to -u-va-.
|
||||
// TODO: add preferred region to -u-rg-.
|
||||
if e := w.Extensions(); len(e) > 0 {
|
||||
t, _ = Raw.Compose(t, e)
|
||||
}
|
||||
return t, index, c
|
||||
}
|
||||
|
||||
type scriptRegionFlags uint8
|
||||
|
||||
const (
|
||||
isList = 1 << iota
|
||||
scriptInFrom
|
||||
regionInFrom
|
||||
)
|
||||
|
||||
func (t *Tag) setUndefinedLang(id langID) {
|
||||
if t.lang == 0 {
|
||||
t.lang = id
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Tag) setUndefinedScript(id scriptID) {
|
||||
if t.script == 0 {
|
||||
t.script = id
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Tag) setUndefinedRegion(id regionID) {
|
||||
if t.region == 0 || t.region.contains(id) {
|
||||
t.region = id
|
||||
}
|
||||
}
|
||||
|
||||
// ErrMissingLikelyTagsData indicates no information was available
|
||||
// to compute likely values of missing tags.
|
||||
var ErrMissingLikelyTagsData = errors.New("missing likely tags data")
|
||||
|
||||
// addLikelySubtags sets subtags to their most likely value, given the locale.
|
||||
// In most cases this means setting fields for unknown values, but in some
|
||||
// cases it may alter a value. It returns an ErrMissingLikelyTagsData error
|
||||
// if the given locale cannot be expanded.
|
||||
func (t Tag) addLikelySubtags() (Tag, error) {
|
||||
id, err := addTags(t)
|
||||
if err != nil {
|
||||
return t, err
|
||||
} else if id.equalTags(t) {
|
||||
return t, nil
|
||||
}
|
||||
id.remakeString()
|
||||
return id, nil
|
||||
}
|
||||
|
||||
// specializeRegion attempts to specialize a group region.
|
||||
func specializeRegion(t *Tag) bool {
|
||||
if i := regionInclusion[t.region]; i < nRegionGroups {
|
||||
x := likelyRegionGroup[i]
|
||||
if langID(x.lang) == t.lang && scriptID(x.script) == t.script {
|
||||
t.region = regionID(x.region)
|
||||
}
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func addTags(t Tag) (Tag, error) {
|
||||
// We leave private use identifiers alone.
|
||||
if t.private() {
|
||||
return t, nil
|
||||
}
|
||||
if t.script != 0 && t.region != 0 {
|
||||
if t.lang != 0 {
|
||||
// already fully specified
|
||||
specializeRegion(&t)
|
||||
return t, nil
|
||||
}
|
||||
// Search matches for und-script-region. Note that for these cases
|
||||
// region will never be a group so there is no need to check for this.
|
||||
list := likelyRegion[t.region : t.region+1]
|
||||
if x := list[0]; x.flags&isList != 0 {
|
||||
list = likelyRegionList[x.lang : x.lang+uint16(x.script)]
|
||||
}
|
||||
for _, x := range list {
|
||||
// Deviating from the spec. See match_test.go for details.
|
||||
if scriptID(x.script) == t.script {
|
||||
t.setUndefinedLang(langID(x.lang))
|
||||
return t, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
if t.lang != 0 {
|
||||
// Search matches for lang-script and lang-region, where lang != und.
|
||||
if t.lang < langNoIndexOffset {
|
||||
x := likelyLang[t.lang]
|
||||
if x.flags&isList != 0 {
|
||||
list := likelyLangList[x.region : x.region+uint16(x.script)]
|
||||
if t.script != 0 {
|
||||
for _, x := range list {
|
||||
if scriptID(x.script) == t.script && x.flags&scriptInFrom != 0 {
|
||||
t.setUndefinedRegion(regionID(x.region))
|
||||
return t, nil
|
||||
}
|
||||
}
|
||||
} else if t.region != 0 {
|
||||
count := 0
|
||||
goodScript := true
|
||||
tt := t
|
||||
for _, x := range list {
|
||||
// We visit all entries for which the script was not
|
||||
// defined, including the ones where the region was not
|
||||
// defined. This allows for proper disambiguation within
|
||||
// regions.
|
||||
if x.flags&scriptInFrom == 0 && t.region.contains(regionID(x.region)) {
|
||||
tt.region = regionID(x.region)
|
||||
tt.setUndefinedScript(scriptID(x.script))
|
||||
goodScript = goodScript && tt.script == scriptID(x.script)
|
||||
count++
|
||||
}
|
||||
}
|
||||
if count == 1 {
|
||||
return tt, nil
|
||||
}
|
||||
// Even if we fail to find a unique Region, we might have
|
||||
// an unambiguous script.
|
||||
if goodScript {
|
||||
t.script = tt.script
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Search matches for und-script.
|
||||
if t.script != 0 {
|
||||
x := likelyScript[t.script]
|
||||
if x.region != 0 {
|
||||
t.setUndefinedRegion(regionID(x.region))
|
||||
t.setUndefinedLang(langID(x.lang))
|
||||
return t, nil
|
||||
}
|
||||
}
|
||||
// Search matches for und-region. If und-script-region exists, it would
|
||||
// have been found earlier.
|
||||
if t.region != 0 {
|
||||
if i := regionInclusion[t.region]; i < nRegionGroups {
|
||||
x := likelyRegionGroup[i]
|
||||
if x.region != 0 {
|
||||
t.setUndefinedLang(langID(x.lang))
|
||||
t.setUndefinedScript(scriptID(x.script))
|
||||
t.region = regionID(x.region)
|
||||
}
|
||||
} else {
|
||||
x := likelyRegion[t.region]
|
||||
if x.flags&isList != 0 {
|
||||
x = likelyRegionList[x.lang]
|
||||
}
|
||||
if x.script != 0 && x.flags != scriptInFrom {
|
||||
t.setUndefinedLang(langID(x.lang))
|
||||
t.setUndefinedScript(scriptID(x.script))
|
||||
return t, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Search matches for lang.
|
||||
if t.lang < langNoIndexOffset {
|
||||
x := likelyLang[t.lang]
|
||||
if x.flags&isList != 0 {
|
||||
x = likelyLangList[x.region]
|
||||
}
|
||||
if x.region != 0 {
|
||||
t.setUndefinedScript(scriptID(x.script))
|
||||
t.setUndefinedRegion(regionID(x.region))
|
||||
}
|
||||
specializeRegion(&t)
|
||||
if t.lang == 0 {
|
||||
t.lang = _en // default language
|
||||
}
|
||||
return t, nil
|
||||
}
|
||||
return t, ErrMissingLikelyTagsData
|
||||
}
|
||||
|
||||
func (t *Tag) setTagsFrom(id Tag) {
|
||||
t.lang = id.lang
|
||||
t.script = id.script
|
||||
t.region = id.region
|
||||
}
|
||||
|
||||
// minimize removes the region or script subtags from t such that
|
||||
// t.addLikelySubtags() == t.minimize().addLikelySubtags().
|
||||
func (t Tag) minimize() (Tag, error) {
|
||||
t, err := minimizeTags(t)
|
||||
if err != nil {
|
||||
return t, err
|
||||
}
|
||||
t.remakeString()
|
||||
return t, nil
|
||||
}
|
||||
|
||||
// minimizeTags mimics the behavior of the ICU 51 C implementation.
|
||||
func minimizeTags(t Tag) (Tag, error) {
|
||||
if t.equalTags(und) {
|
||||
return t, nil
|
||||
}
|
||||
max, err := addTags(t)
|
||||
if err != nil {
|
||||
return t, err
|
||||
}
|
||||
for _, id := range [...]Tag{
|
||||
{lang: t.lang},
|
||||
{lang: t.lang, region: t.region},
|
||||
{lang: t.lang, script: t.script},
|
||||
} {
|
||||
if x, err := addTags(id); err == nil && max.equalTags(x) {
|
||||
t.setTagsFrom(id)
|
||||
break
|
||||
}
|
||||
}
|
||||
return t, nil
|
||||
}
|
||||
|
||||
// Tag Matching
|
||||
// CLDR defines an algorithm for finding the best match between two sets of language
|
||||
// tags. The basic algorithm defines how to score a possible match and then find
|
||||
// the match with the best score
|
||||
// (see http://www.unicode.org/reports/tr35/#LanguageMatching).
|
||||
// Using scoring has several disadvantages. The scoring obfuscates the importance of
|
||||
// the various factors considered, making the algorithm harder to understand. Using
|
||||
// scoring also requires the full score to be computed for each pair of tags.
|
||||
//
|
||||
// We will use a different algorithm which aims to have the following properties:
|
||||
// - clarity on the precedence of the various selection factors, and
|
||||
// - improved performance by allowing early termination of a comparison.
|
||||
//
|
||||
// Matching algorithm (overview)
|
||||
// Input:
|
||||
// - supported: a set of supported tags
|
||||
// - default: the default tag to return in case there is no match
|
||||
// - desired: list of desired tags, ordered by preference, starting with
|
||||
// the most-preferred.
|
||||
//
|
||||
// Algorithm:
|
||||
// 1) Set the best match to the lowest confidence level
|
||||
// 2) For each tag in "desired":
|
||||
// a) For each tag in "supported":
|
||||
// 1) compute the match between the two tags.
|
||||
// 2) if the match is better than the previous best match, replace it
|
||||
// with the new match. (see next section)
|
||||
// b) if the current best match is Exact and pin is true the result will be
|
||||
// frozen to the language found thusfar, although better matches may
|
||||
// still be found for the same language.
|
||||
// 3) If the best match so far is below a certain threshold, return "default".
|
||||
//
|
||||
// Ranking:
|
||||
// We use two phases to determine whether one pair of tags are a better match
|
||||
// than another pair of tags. First, we determine a rough confidence level. If the
|
||||
// levels are different, the one with the highest confidence wins.
|
||||
// Second, if the rough confidence levels are identical, we use a set of tie-breaker
|
||||
// rules.
|
||||
//
|
||||
// The confidence level of matching a pair of tags is determined by finding the
|
||||
// lowest confidence level of any matches of the corresponding subtags (the
|
||||
// result is deemed as good as its weakest link).
|
||||
// We define the following levels:
|
||||
// Exact - An exact match of a subtag, before adding likely subtags.
|
||||
// MaxExact - An exact match of a subtag, after adding likely subtags.
|
||||
// [See Note 2].
|
||||
// High - High level of mutual intelligibility between different subtag
|
||||
// variants.
|
||||
// Low - Low level of mutual intelligibility between different subtag
|
||||
// variants.
|
||||
// No - No mutual intelligibility.
|
||||
//
|
||||
// The following levels can occur for each type of subtag:
|
||||
// Base: Exact, MaxExact, High, Low, No
|
||||
// Script: Exact, MaxExact [see Note 3], Low, No
|
||||
// Region: Exact, MaxExact, High
|
||||
// Variant: Exact, High
|
||||
// Private: Exact, No
|
||||
//
|
||||
// Any result with a confidence level of Low or higher is deemed a possible match.
|
||||
// Once a desired tag matches any of the supported tags with a level of MaxExact
|
||||
// or higher, the next desired tag is not considered (see Step 2.b).
|
||||
// Note that CLDR provides languageMatching data that defines close equivalence
|
||||
// classes for base languages, scripts and regions.
|
||||
//
|
||||
// Tie-breaking
|
||||
// If we get the same confidence level for two matches, we apply a sequence of
|
||||
// tie-breaking rules. The first that succeeds defines the result. The rules are
|
||||
// applied in the following order.
|
||||
// 1) Original language was defined and was identical.
|
||||
// 2) Original region was defined and was identical.
|
||||
// 3) Distance between two maximized regions was the smallest.
|
||||
// 4) Original script was defined and was identical.
|
||||
// 5) Distance from want tag to have tag using the parent relation [see Note 5.]
|
||||
// If there is still no winner after these rules are applied, the first match
|
||||
// found wins.
|
||||
//
|
||||
// Notes:
|
||||
// [2] In practice, as matching of Exact is done in a separate phase from
|
||||
// matching the other levels, we reuse the Exact level to mean MaxExact in
|
||||
// the second phase. As a consequence, we only need the levels defined by
|
||||
// the Confidence type. The MaxExact confidence level is mapped to High in
|
||||
// the public API.
|
||||
// [3] We do not differentiate between maximized script values that were derived
|
||||
// from suppressScript versus most likely tag data. We determined that in
|
||||
// ranking the two, one ranks just after the other. Moreover, the two cannot
|
||||
// occur concurrently. As a consequence, they are identical for practical
|
||||
// purposes.
|
||||
// [4] In case of deprecated, macro-equivalents and legacy mappings, we assign
|
||||
// the MaxExact level to allow iw vs he to still be a closer match than
|
||||
// en-AU vs en-US, for example.
|
||||
// [5] In CLDR a locale inherits fields that are unspecified for this locale
|
||||
// from its parent. Therefore, if a locale is a parent of another locale,
|
||||
// it is a strong measure for closeness, especially when no other tie
|
||||
// breaker rule applies. One could also argue it is inconsistent, for
|
||||
// example, when pt-AO matches pt (which CLDR equates with pt-BR), even
|
||||
// though its parent is pt-PT according to the inheritance rules.
|
||||
//
|
||||
// Implementation Details:
|
||||
// There are several performance considerations worth pointing out. Most notably,
|
||||
// we preprocess as much as possible (within reason) at the time of creation of a
|
||||
// matcher. This includes:
|
||||
// - creating a per-language map, which includes data for the raw base language
|
||||
// and its canonicalized variant (if applicable),
|
||||
// - expanding entries for the equivalence classes defined in CLDR's
|
||||
// languageMatch data.
|
||||
// The per-language map ensures that typically only a very small number of tags
|
||||
// need to be considered. The pre-expansion of canonicalized subtags and
|
||||
// equivalence classes reduces the amount of map lookups that need to be done at
|
||||
// runtime.
|
||||
|
||||
// matcher keeps a set of supported language tags, indexed by language.
|
||||
type matcher struct {
|
||||
default_ *haveTag
|
||||
supported []*haveTag
|
||||
index map[langID]*matchHeader
|
||||
passSettings bool
|
||||
preferSameScript bool
|
||||
}
|
||||
|
||||
// matchHeader has the lists of tags for exact matches and matches based on
|
||||
// maximized and canonicalized tags for a given language.
|
||||
type matchHeader struct {
|
||||
haveTags []*haveTag
|
||||
original bool
|
||||
}
|
||||
|
||||
// haveTag holds a supported Tag and its maximized script and region. The maximized
|
||||
// or canonicalized language is not stored as it is not needed during matching.
|
||||
type haveTag struct {
|
||||
tag Tag
|
||||
|
||||
// index of this tag in the original list of supported tags.
|
||||
index int
|
||||
|
||||
// conf is the maximum confidence that can result from matching this haveTag.
|
||||
// When conf < Exact this means it was inserted after applying a CLDR equivalence rule.
|
||||
conf Confidence
|
||||
|
||||
// Maximized region and script.
|
||||
maxRegion regionID
|
||||
maxScript scriptID
|
||||
|
||||
// altScript may be checked as an alternative match to maxScript. If altScript
|
||||
// matches, the confidence level for this match is Low. Theoretically there
|
||||
// could be multiple alternative scripts. This does not occur in practice.
|
||||
altScript scriptID
|
||||
|
||||
// nextMax is the index of the next haveTag with the same maximized tags.
|
||||
nextMax uint16
|
||||
}
|
||||
|
||||
func makeHaveTag(tag Tag, index int) (haveTag, langID) {
|
||||
max := tag
|
||||
if tag.lang != 0 || tag.region != 0 || tag.script != 0 {
|
||||
max, _ = max.canonicalize(All)
|
||||
max, _ = addTags(max)
|
||||
max.remakeString()
|
||||
}
|
||||
return haveTag{tag, index, Exact, max.region, max.script, altScript(max.lang, max.script), 0}, max.lang
|
||||
}
|
||||
|
||||
// altScript returns an alternative script that may match the given script with
|
||||
// a low confidence. At the moment, the langMatch data allows for at most one
|
||||
// script to map to another and we rely on this to keep the code simple.
|
||||
func altScript(l langID, s scriptID) scriptID {
|
||||
for _, alt := range matchScript {
|
||||
// TODO: also match cases where language is not the same.
|
||||
if (langID(alt.wantLang) == l || langID(alt.haveLang) == l) &&
|
||||
scriptID(alt.haveScript) == s {
|
||||
return scriptID(alt.wantScript)
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// addIfNew adds a haveTag to the list of tags only if it is a unique tag.
|
||||
// Tags that have the same maximized values are linked by index.
|
||||
func (h *matchHeader) addIfNew(n haveTag, exact bool) {
|
||||
h.original = h.original || exact
|
||||
// Don't add new exact matches.
|
||||
for _, v := range h.haveTags {
|
||||
if v.tag.equalsRest(n.tag) {
|
||||
return
|
||||
}
|
||||
}
|
||||
// Allow duplicate maximized tags, but create a linked list to allow quickly
|
||||
// comparing the equivalents and bail out.
|
||||
for i, v := range h.haveTags {
|
||||
if v.maxScript == n.maxScript &&
|
||||
v.maxRegion == n.maxRegion &&
|
||||
v.tag.variantOrPrivateTagStr() == n.tag.variantOrPrivateTagStr() {
|
||||
for h.haveTags[i].nextMax != 0 {
|
||||
i = int(h.haveTags[i].nextMax)
|
||||
}
|
||||
h.haveTags[i].nextMax = uint16(len(h.haveTags))
|
||||
break
|
||||
}
|
||||
}
|
||||
h.haveTags = append(h.haveTags, &n)
|
||||
}
|
||||
|
||||
// header returns the matchHeader for the given language. It creates one if
|
||||
// it doesn't already exist.
|
||||
func (m *matcher) header(l langID) *matchHeader {
|
||||
if h := m.index[l]; h != nil {
|
||||
return h
|
||||
}
|
||||
h := &matchHeader{}
|
||||
m.index[l] = h
|
||||
return h
|
||||
}
|
||||
|
||||
func toConf(d uint8) Confidence {
|
||||
if d <= 10 {
|
||||
return High
|
||||
}
|
||||
if d < 30 {
|
||||
return Low
|
||||
}
|
||||
return No
|
||||
}
|
||||
|
||||
// newMatcher builds an index for the given supported tags and returns it as
|
||||
// a matcher. It also expands the index by considering various equivalence classes
|
||||
// for a given tag.
|
||||
func newMatcher(supported []Tag, options []MatchOption) *matcher {
|
||||
m := &matcher{
|
||||
index: make(map[langID]*matchHeader),
|
||||
preferSameScript: true,
|
||||
}
|
||||
for _, o := range options {
|
||||
o(m)
|
||||
}
|
||||
if len(supported) == 0 {
|
||||
m.default_ = &haveTag{}
|
||||
return m
|
||||
}
|
||||
// Add supported languages to the index. Add exact matches first to give
|
||||
// them precedence.
|
||||
for i, tag := range supported {
|
||||
pair, _ := makeHaveTag(tag, i)
|
||||
m.header(tag.lang).addIfNew(pair, true)
|
||||
m.supported = append(m.supported, &pair)
|
||||
}
|
||||
m.default_ = m.header(supported[0].lang).haveTags[0]
|
||||
// Keep these in two different loops to support the case that two equivalent
|
||||
// languages are distinguished, such as iw and he.
|
||||
for i, tag := range supported {
|
||||
pair, max := makeHaveTag(tag, i)
|
||||
if max != tag.lang {
|
||||
m.header(max).addIfNew(pair, true)
|
||||
}
|
||||
}
|
||||
|
||||
// update is used to add indexes in the map for equivalent languages.
|
||||
// update will only add entries to original indexes, thus not computing any
|
||||
// transitive relations.
|
||||
update := func(want, have uint16, conf Confidence) {
|
||||
if hh := m.index[langID(have)]; hh != nil {
|
||||
if !hh.original {
|
||||
return
|
||||
}
|
||||
hw := m.header(langID(want))
|
||||
for _, ht := range hh.haveTags {
|
||||
v := *ht
|
||||
if conf < v.conf {
|
||||
v.conf = conf
|
||||
}
|
||||
v.nextMax = 0 // this value needs to be recomputed
|
||||
if v.altScript != 0 {
|
||||
v.altScript = altScript(langID(want), v.maxScript)
|
||||
}
|
||||
hw.addIfNew(v, conf == Exact && hh.original)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Add entries for languages with mutual intelligibility as defined by CLDR's
|
||||
// languageMatch data.
|
||||
for _, ml := range matchLang {
|
||||
update(ml.want, ml.have, toConf(ml.distance))
|
||||
if !ml.oneway {
|
||||
update(ml.have, ml.want, toConf(ml.distance))
|
||||
}
|
||||
}
|
||||
|
||||
// Add entries for possible canonicalizations. This is an optimization to
|
||||
// ensure that only one map lookup needs to be done at runtime per desired tag.
|
||||
// First we match deprecated equivalents. If they are perfect equivalents
|
||||
// (their canonicalization simply substitutes a different language code, but
|
||||
// nothing else), the match confidence is Exact, otherwise it is High.
|
||||
for i, lm := range langAliasMap {
|
||||
// If deprecated codes match and there is no fiddling with the script or
|
||||
// or region, we consider it an exact match.
|
||||
conf := Exact
|
||||
if langAliasTypes[i] != langMacro {
|
||||
if !isExactEquivalent(langID(lm.from)) {
|
||||
conf = High
|
||||
}
|
||||
update(lm.to, lm.from, conf)
|
||||
}
|
||||
update(lm.from, lm.to, conf)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
// getBest gets the best matching tag in m for any of the given tags, taking into
|
||||
// account the order of preference of the given tags.
|
||||
func (m *matcher) getBest(want ...Tag) (got *haveTag, orig Tag, c Confidence) {
|
||||
best := bestMatch{}
|
||||
for i, w := range want {
|
||||
var max Tag
|
||||
// Check for exact match first.
|
||||
h := m.index[w.lang]
|
||||
if w.lang != 0 {
|
||||
if h == nil {
|
||||
continue
|
||||
}
|
||||
// Base language is defined.
|
||||
max, _ = w.canonicalize(Legacy | Deprecated | Macro)
|
||||
// A region that is added through canonicalization is stronger than
|
||||
// a maximized region: set it in the original (e.g. mo -> ro-MD).
|
||||
if w.region != max.region {
|
||||
w.region = max.region
|
||||
}
|
||||
// TODO: should we do the same for scripts?
|
||||
// See test case: en, sr, nl ; sh ; sr
|
||||
max, _ = addTags(max)
|
||||
} else {
|
||||
// Base language is not defined.
|
||||
if h != nil {
|
||||
for i := range h.haveTags {
|
||||
have := h.haveTags[i]
|
||||
if have.tag.equalsRest(w) {
|
||||
return have, w, Exact
|
||||
}
|
||||
}
|
||||
}
|
||||
if w.script == 0 && w.region == 0 {
|
||||
// We skip all tags matching und for approximate matching, including
|
||||
// private tags.
|
||||
continue
|
||||
}
|
||||
max, _ = addTags(w)
|
||||
if h = m.index[max.lang]; h == nil {
|
||||
continue
|
||||
}
|
||||
}
|
||||
pin := true
|
||||
for _, t := range want[i+1:] {
|
||||
if w.lang == t.lang {
|
||||
pin = false
|
||||
break
|
||||
}
|
||||
}
|
||||
// Check for match based on maximized tag.
|
||||
for i := range h.haveTags {
|
||||
have := h.haveTags[i]
|
||||
best.update(have, w, max.script, max.region, pin)
|
||||
if best.conf == Exact {
|
||||
for have.nextMax != 0 {
|
||||
have = h.haveTags[have.nextMax]
|
||||
best.update(have, w, max.script, max.region, pin)
|
||||
}
|
||||
return best.have, best.want, best.conf
|
||||
}
|
||||
}
|
||||
}
|
||||
if best.conf <= No {
|
||||
if len(want) != 0 {
|
||||
return nil, want[0], No
|
||||
}
|
||||
return nil, Tag{}, No
|
||||
}
|
||||
return best.have, best.want, best.conf
|
||||
}
|
||||
|
||||
// bestMatch accumulates the best match so far.
|
||||
type bestMatch struct {
|
||||
have *haveTag
|
||||
want Tag
|
||||
conf Confidence
|
||||
pinnedRegion regionID
|
||||
pinLanguage bool
|
||||
sameRegionGroup bool
|
||||
// Cached results from applying tie-breaking rules.
|
||||
origLang bool
|
||||
origReg bool
|
||||
paradigmReg bool
|
||||
regGroupDist uint8
|
||||
origScript bool
|
||||
}
|
||||
|
||||
// update updates the existing best match if the new pair is considered to be a
|
||||
// better match. To determine if the given pair is a better match, it first
|
||||
// computes the rough confidence level. If this surpasses the current match, it
|
||||
// will replace it and update the tie-breaker rule cache. If there is a tie, it
|
||||
// proceeds with applying a series of tie-breaker rules. If there is no
|
||||
// conclusive winner after applying the tie-breaker rules, it leaves the current
|
||||
// match as the preferred match.
|
||||
//
|
||||
// If pin is true and have and tag are a strong match, it will henceforth only
|
||||
// consider matches for this language. This corresponds to the nothing that most
|
||||
// users have a strong preference for the first defined language. A user can
|
||||
// still prefer a second language over a dialect of the preferred language by
|
||||
// explicitly specifying dialects, e.g. "en, nl, en-GB". In this case pin should
|
||||
// be false.
|
||||
func (m *bestMatch) update(have *haveTag, tag Tag, maxScript scriptID, maxRegion regionID, pin bool) {
|
||||
// Bail if the maximum attainable confidence is below that of the current best match.
|
||||
c := have.conf
|
||||
if c < m.conf {
|
||||
return
|
||||
}
|
||||
// Don't change the language once we already have found an exact match.
|
||||
if m.pinLanguage && tag.lang != m.want.lang {
|
||||
return
|
||||
}
|
||||
// Pin the region group if we are comparing tags for the same language.
|
||||
if tag.lang == m.want.lang && m.sameRegionGroup {
|
||||
_, sameGroup := regionGroupDist(m.pinnedRegion, have.maxRegion, have.maxScript, m.want.lang)
|
||||
if !sameGroup {
|
||||
return
|
||||
}
|
||||
}
|
||||
if c == Exact && have.maxScript == maxScript {
|
||||
// If there is another language and then another entry of this language,
|
||||
// don't pin anything, otherwise pin the language.
|
||||
m.pinLanguage = pin
|
||||
}
|
||||
if have.tag.equalsRest(tag) {
|
||||
} else if have.maxScript != maxScript {
|
||||
// There is usually very little comprehension between different scripts.
|
||||
// In a few cases there may still be Low comprehension. This possibility
|
||||
// is pre-computed and stored in have.altScript.
|
||||
if Low < m.conf || have.altScript != maxScript {
|
||||
return
|
||||
}
|
||||
c = Low
|
||||
} else if have.maxRegion != maxRegion {
|
||||
if High < c {
|
||||
// There is usually a small difference between languages across regions.
|
||||
c = High
|
||||
}
|
||||
}
|
||||
|
||||
// We store the results of the computations of the tie-breaker rules along
|
||||
// with the best match. There is no need to do the checks once we determine
|
||||
// we have a winner, but we do still need to do the tie-breaker computations.
|
||||
// We use "beaten" to keep track if we still need to do the checks.
|
||||
beaten := false // true if the new pair defeats the current one.
|
||||
if c != m.conf {
|
||||
if c < m.conf {
|
||||
return
|
||||
}
|
||||
beaten = true
|
||||
}
|
||||
|
||||
// Tie-breaker rules:
|
||||
// We prefer if the pre-maximized language was specified and identical.
|
||||
origLang := have.tag.lang == tag.lang && tag.lang != 0
|
||||
if !beaten && m.origLang != origLang {
|
||||
if m.origLang {
|
||||
return
|
||||
}
|
||||
beaten = true
|
||||
}
|
||||
|
||||
// We prefer if the pre-maximized region was specified and identical.
|
||||
origReg := have.tag.region == tag.region && tag.region != 0
|
||||
if !beaten && m.origReg != origReg {
|
||||
if m.origReg {
|
||||
return
|
||||
}
|
||||
beaten = true
|
||||
}
|
||||
|
||||
regGroupDist, sameGroup := regionGroupDist(have.maxRegion, maxRegion, maxScript, tag.lang)
|
||||
if !beaten && m.regGroupDist != regGroupDist {
|
||||
if regGroupDist > m.regGroupDist {
|
||||
return
|
||||
}
|
||||
beaten = true
|
||||
}
|
||||
|
||||
paradigmReg := isParadigmLocale(tag.lang, have.maxRegion)
|
||||
if !beaten && m.paradigmReg != paradigmReg {
|
||||
if !paradigmReg {
|
||||
return
|
||||
}
|
||||
beaten = true
|
||||
}
|
||||
|
||||
// Next we prefer if the pre-maximized script was specified and identical.
|
||||
origScript := have.tag.script == tag.script && tag.script != 0
|
||||
if !beaten && m.origScript != origScript {
|
||||
if m.origScript {
|
||||
return
|
||||
}
|
||||
beaten = true
|
||||
}
|
||||
|
||||
// Update m to the newly found best match.
|
||||
if beaten {
|
||||
m.have = have
|
||||
m.want = tag
|
||||
m.conf = c
|
||||
m.pinnedRegion = maxRegion
|
||||
m.sameRegionGroup = sameGroup
|
||||
m.origLang = origLang
|
||||
m.origReg = origReg
|
||||
m.paradigmReg = paradigmReg
|
||||
m.origScript = origScript
|
||||
m.regGroupDist = regGroupDist
|
||||
}
|
||||
}
|
||||
|
||||
func isParadigmLocale(lang langID, r regionID) bool {
|
||||
for _, e := range paradigmLocales {
|
||||
if langID(e[0]) == lang && (r == regionID(e[1]) || r == regionID(e[2])) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// regionGroupDist computes the distance between two regions based on their
|
||||
// CLDR grouping.
|
||||
func regionGroupDist(a, b regionID, script scriptID, lang langID) (dist uint8, same bool) {
|
||||
const defaultDistance = 4
|
||||
|
||||
aGroup := uint(regionToGroups[a]) << 1
|
||||
bGroup := uint(regionToGroups[b]) << 1
|
||||
for _, ri := range matchRegion {
|
||||
if langID(ri.lang) == lang && (ri.script == 0 || scriptID(ri.script) == script) {
|
||||
group := uint(1 << (ri.group &^ 0x80))
|
||||
if 0x80&ri.group == 0 {
|
||||
if aGroup&bGroup&group != 0 { // Both regions are in the group.
|
||||
return ri.distance, ri.distance == defaultDistance
|
||||
}
|
||||
} else {
|
||||
if (aGroup|bGroup)&group == 0 { // Both regions are not in the group.
|
||||
return ri.distance, ri.distance == defaultDistance
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return defaultDistance, true
|
||||
}
|
||||
|
||||
func (t Tag) variants() string {
|
||||
if t.pVariant == 0 {
|
||||
return ""
|
||||
}
|
||||
return t.str[t.pVariant:t.pExt]
|
||||
}
|
||||
|
||||
// variantOrPrivateTagStr returns variants or private use tags.
|
||||
func (t Tag) variantOrPrivateTagStr() string {
|
||||
if t.pExt > 0 {
|
||||
return t.str[t.pVariant:t.pExt]
|
||||
}
|
||||
return t.str[t.pVariant:]
|
||||
}
|
||||
|
||||
// equalsRest compares everything except the language.
|
||||
func (a Tag) equalsRest(b Tag) bool {
|
||||
// TODO: don't include extensions in this comparison. To do this efficiently,
|
||||
// though, we should handle private tags separately.
|
||||
return a.script == b.script && a.region == b.region && a.variantOrPrivateTagStr() == b.variantOrPrivateTagStr()
|
||||
}
|
||||
|
||||
// isExactEquivalent returns true if canonicalizing the language will not alter
|
||||
// the script or region of a tag.
|
||||
func isExactEquivalent(l langID) bool {
|
||||
for _, o := range notEquivalent {
|
||||
if o == l {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
var notEquivalent []langID
|
||||
|
||||
func init() {
|
||||
// Create a list of all languages for which canonicalization may alter the
|
||||
// script or region.
|
||||
for _, lm := range langAliasMap {
|
||||
tag := Tag{lang: langID(lm.from)}
|
||||
if tag, _ = tag.canonicalize(All); tag.script != 0 || tag.region != 0 {
|
||||
notEquivalent = append(notEquivalent, langID(lm.from))
|
||||
}
|
||||
}
|
||||
// Maximize undefined regions of paradigm locales.
|
||||
for i, v := range paradigmLocales {
|
||||
max, _ := addTags(Tag{lang: langID(v[0])})
|
||||
if v[1] == 0 {
|
||||
paradigmLocales[i][1] = uint16(max.region)
|
||||
}
|
||||
if v[2] == 0 {
|
||||
paradigmLocales[i][2] = uint16(max.region)
|
||||
}
|
||||
}
|
||||
}
|
505
vendor/golang.org/x/text/language/match_test.go
generated
vendored
Normal file
505
vendor/golang.org/x/text/language/match_test.go
generated
vendored
Normal file
@ -0,0 +1,505 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package language
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"golang.org/x/text/internal/testtext"
|
||||
"golang.org/x/text/internal/ucd"
|
||||
)
|
||||
|
||||
var verbose = flag.Bool("verbose", false, "set to true to print the internal tables of matchers")
|
||||
|
||||
func TestCompliance(t *testing.T) {
|
||||
filepath.Walk("testdata", func(file string, info os.FileInfo, err error) error {
|
||||
if info.IsDir() {
|
||||
return nil
|
||||
}
|
||||
r, err := os.Open(file)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ucd.Parse(r, func(p *ucd.Parser) {
|
||||
name := strings.Replace(path.Join(p.String(0), p.String(1)), " ", "", -1)
|
||||
if skip[name] {
|
||||
return
|
||||
}
|
||||
t.Run(info.Name()+"/"+name, func(t *testing.T) {
|
||||
supported := makeTagList(p.String(0))
|
||||
desired := makeTagList(p.String(1))
|
||||
gotCombined, index, conf := NewMatcher(supported).Match(desired...)
|
||||
|
||||
gotMatch := supported[index]
|
||||
wantMatch := mk(p.String(2))
|
||||
if gotMatch != wantMatch {
|
||||
t.Fatalf("match: got %q; want %q (%v)", gotMatch, wantMatch, conf)
|
||||
}
|
||||
wantCombined, err := Raw.Parse(p.String(3))
|
||||
if err == nil && gotCombined != wantCombined {
|
||||
t.Errorf("combined: got %q; want %q (%v)", gotCombined, wantCombined, conf)
|
||||
}
|
||||
})
|
||||
})
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
var skip = map[string]bool{
|
||||
// TODO: bugs
|
||||
// Honor the wildcard match. This may only be useful to select non-exact
|
||||
// stuff.
|
||||
"mul,af/nl": true, // match: got "af"; want "mul"
|
||||
|
||||
// TODO: include other extensions.
|
||||
// combined: got "en-GB-u-ca-buddhist-nu-arab"; want "en-GB-fonipa-t-m0-iso-i0-pinyin-u-ca-buddhist-nu-arab"
|
||||
"und,en-GB-u-sd-gbsct/en-fonipa-u-nu-Arab-ca-buddhist-t-m0-iso-i0-pinyin": true,
|
||||
|
||||
// Inconsistencies with Mark Davis' implementation where it is not clear
|
||||
// which is better.
|
||||
|
||||
// Inconsistencies in combined. I think the Go approach is more appropriate.
|
||||
// We could use -u-rg- and -u-va- as alternative.
|
||||
"und,fr/fr-BE-fonipa": true, // combined: got "fr"; want "fr-BE-fonipa"
|
||||
"und,fr-CA/fr-BE-fonipa": true, // combined: got "fr-CA"; want "fr-BE-fonipa"
|
||||
"und,fr-fonupa/fr-BE-fonipa": true, // combined: got "fr-fonupa"; want "fr-BE-fonipa"
|
||||
"und,no/nn-BE-fonipa": true, // combined: got "no"; want "no-BE-fonipa"
|
||||
"50,und,fr-CA-fonupa/fr-BE-fonipa": true, // combined: got "fr-CA-fonupa"; want "fr-BE-fonipa"
|
||||
|
||||
// The initial number is a threshold. As we don't use scoring, we will not
|
||||
// implement this.
|
||||
"50,und,fr-Cyrl-CA-fonupa/fr-BE-fonipa": true,
|
||||
// match: got "und"; want "fr-Cyrl-CA-fonupa"
|
||||
// combined: got "und"; want "fr-Cyrl-BE-fonipa"
|
||||
|
||||
// Other interesting cases to test:
|
||||
// - Should same language or same script have the preference if there is
|
||||
// usually no understanding of the other script?
|
||||
// - More specific region in desired may replace enclosing supported.
|
||||
}
|
||||
|
||||
func makeTagList(s string) (tags []Tag) {
|
||||
for _, s := range strings.Split(s, ",") {
|
||||
tags = append(tags, mk(strings.TrimSpace(s)))
|
||||
}
|
||||
return tags
|
||||
}
|
||||
|
||||
func TestMatchStrings(t *testing.T) {
|
||||
testCases := []struct {
|
||||
supported string
|
||||
desired string // strings separted by |
|
||||
tag string
|
||||
index int
|
||||
}{{
|
||||
supported: "en",
|
||||
desired: "",
|
||||
tag: "en",
|
||||
index: 0,
|
||||
}, {
|
||||
supported: "en",
|
||||
desired: "nl",
|
||||
tag: "en",
|
||||
index: 0,
|
||||
}, {
|
||||
supported: "en,nl",
|
||||
desired: "nl",
|
||||
tag: "nl",
|
||||
index: 1,
|
||||
}, {
|
||||
supported: "en,nl",
|
||||
desired: "nl|en",
|
||||
tag: "nl",
|
||||
index: 1,
|
||||
}, {
|
||||
supported: "en-GB,nl",
|
||||
desired: "en ; q=0.1,nl",
|
||||
tag: "nl",
|
||||
index: 1,
|
||||
}, {
|
||||
supported: "en-GB,nl",
|
||||
desired: "en;q=0.005 | dk; q=0.1,nl ",
|
||||
tag: "en-GB",
|
||||
index: 0,
|
||||
}, {
|
||||
// do not match faulty tags with und
|
||||
supported: "en,und",
|
||||
desired: "|en",
|
||||
tag: "en",
|
||||
index: 0,
|
||||
}}
|
||||
for _, tc := range testCases {
|
||||
t.Run(path.Join(tc.supported, tc.desired), func(t *testing.T) {
|
||||
m := NewMatcher(makeTagList(tc.supported))
|
||||
tag, index := MatchStrings(m, strings.Split(tc.desired, "|")...)
|
||||
if tag.String() != tc.tag || index != tc.index {
|
||||
t.Errorf("got %v, %d; want %v, %d", tag, index, tc.tag, tc.index)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddLikelySubtags(t *testing.T) {
|
||||
tests := []struct{ in, out string }{
|
||||
{"aa", "aa-Latn-ET"},
|
||||
{"aa-Latn", "aa-Latn-ET"},
|
||||
{"aa-Arab", "aa-Arab-ET"},
|
||||
{"aa-Arab-ER", "aa-Arab-ER"},
|
||||
{"kk", "kk-Cyrl-KZ"},
|
||||
{"kk-CN", "kk-Arab-CN"},
|
||||
{"cmn", "cmn"},
|
||||
{"zh-AU", "zh-Hant-AU"},
|
||||
{"zh-VN", "zh-Hant-VN"},
|
||||
{"zh-SG", "zh-Hans-SG"},
|
||||
{"zh-Hant", "zh-Hant-TW"},
|
||||
{"zh-Hani", "zh-Hani-CN"},
|
||||
{"und-Hani", "zh-Hani-CN"},
|
||||
{"und", "en-Latn-US"},
|
||||
{"und-GB", "en-Latn-GB"},
|
||||
{"und-CW", "pap-Latn-CW"},
|
||||
{"und-YT", "fr-Latn-YT"},
|
||||
{"und-Arab", "ar-Arab-EG"},
|
||||
{"und-AM", "hy-Armn-AM"},
|
||||
{"und-TW", "zh-Hant-TW"},
|
||||
{"und-002", "en-Latn-NG"},
|
||||
{"und-Latn-002", "en-Latn-NG"},
|
||||
{"en-Latn-002", "en-Latn-NG"},
|
||||
{"en-002", "en-Latn-NG"},
|
||||
{"en-001", "en-Latn-US"},
|
||||
{"und-003", "en-Latn-US"},
|
||||
{"und-GB", "en-Latn-GB"},
|
||||
{"Latn-001", "en-Latn-US"},
|
||||
{"en-001", "en-Latn-US"},
|
||||
{"es-419", "es-Latn-419"},
|
||||
{"he-145", "he-Hebr-IL"},
|
||||
{"ky-145", "ky-Latn-TR"},
|
||||
{"kk", "kk-Cyrl-KZ"},
|
||||
// Don't specialize duplicate and ambiguous matches.
|
||||
{"kk-034", "kk-Arab-034"}, // Matches IR and AF. Both are Arab.
|
||||
{"ku-145", "ku-Latn-TR"}, // Matches IQ, TR, and LB, but kk -> TR.
|
||||
{"und-Arab-CC", "ms-Arab-CC"},
|
||||
{"und-Arab-GB", "ks-Arab-GB"},
|
||||
{"und-Hans-CC", "zh-Hans-CC"},
|
||||
{"und-CC", "en-Latn-CC"},
|
||||
{"sr", "sr-Cyrl-RS"},
|
||||
{"sr-151", "sr-Latn-151"}, // Matches RO and RU.
|
||||
// We would like addLikelySubtags to generate the same results if the input
|
||||
// only changes by adding tags that would otherwise have been added
|
||||
// by the expansion.
|
||||
// In other words:
|
||||
// und-AA -> xx-Scrp-AA implies und-Scrp-AA -> xx-Scrp-AA
|
||||
// und-AA -> xx-Scrp-AA implies xx-AA -> xx-Scrp-AA
|
||||
// und-Scrp -> xx-Scrp-AA implies und-Scrp-AA -> xx-Scrp-AA
|
||||
// und-Scrp -> xx-Scrp-AA implies xx-Scrp -> xx-Scrp-AA
|
||||
// xx -> xx-Scrp-AA implies xx-Scrp -> xx-Scrp-AA
|
||||
// xx -> xx-Scrp-AA implies xx-AA -> xx-Scrp-AA
|
||||
//
|
||||
// The algorithm specified in
|
||||
// http://unicode.org/reports/tr35/tr35-9.html#Supplemental_Data,
|
||||
// Section C.10, does not handle the first case. For example,
|
||||
// the CLDR data contains an entry und-BJ -> fr-Latn-BJ, but not
|
||||
// there is no rule for und-Latn-BJ. According to spec, und-Latn-BJ
|
||||
// would expand to en-Latn-BJ, violating the aforementioned principle.
|
||||
// We deviate from the spec by letting und-Scrp-AA expand to xx-Scrp-AA
|
||||
// if a rule of the form und-AA -> xx-Scrp-AA is defined.
|
||||
// Note that as of version 23, CLDR has some explicitly specified
|
||||
// entries that do not conform to these rules. The implementation
|
||||
// will not correct these explicit inconsistencies. A later versions of CLDR
|
||||
// is supposed to fix this.
|
||||
{"und-Latn-BJ", "fr-Latn-BJ"},
|
||||
{"und-Bugi-ID", "bug-Bugi-ID"},
|
||||
// regions, scripts and languages without definitions
|
||||
{"und-Arab-AA", "ar-Arab-AA"},
|
||||
{"und-Afak-RE", "fr-Afak-RE"},
|
||||
{"und-Arab-GB", "ks-Arab-GB"},
|
||||
{"abp-Arab-GB", "abp-Arab-GB"},
|
||||
// script has preference over region
|
||||
{"und-Arab-NL", "ar-Arab-NL"},
|
||||
{"zza", "zza-Latn-TR"},
|
||||
// preserve variants and extensions
|
||||
{"de-1901", "de-Latn-DE-1901"},
|
||||
{"de-x-abc", "de-Latn-DE-x-abc"},
|
||||
{"de-1901-x-abc", "de-Latn-DE-1901-x-abc"},
|
||||
{"x-abc", "x-abc"}, // TODO: is this the desired behavior?
|
||||
}
|
||||
for i, tt := range tests {
|
||||
in, _ := Parse(tt.in)
|
||||
out, _ := Parse(tt.out)
|
||||
in, _ = in.addLikelySubtags()
|
||||
if in.String() != out.String() {
|
||||
t.Errorf("%d: add(%s) was %s; want %s", i, tt.in, in, tt.out)
|
||||
}
|
||||
}
|
||||
}
|
||||
func TestMinimize(t *testing.T) {
|
||||
tests := []struct{ in, out string }{
|
||||
{"aa", "aa"},
|
||||
{"aa-Latn", "aa"},
|
||||
{"aa-Latn-ET", "aa"},
|
||||
{"aa-ET", "aa"},
|
||||
{"aa-Arab", "aa-Arab"},
|
||||
{"aa-Arab-ER", "aa-Arab-ER"},
|
||||
{"aa-Arab-ET", "aa-Arab"},
|
||||
{"und", "und"},
|
||||
{"und-Latn", "und"},
|
||||
{"und-Latn-US", "und"},
|
||||
{"en-Latn-US", "en"},
|
||||
{"cmn", "cmn"},
|
||||
{"cmn-Hans", "cmn-Hans"},
|
||||
{"cmn-Hant", "cmn-Hant"},
|
||||
{"zh-AU", "zh-AU"},
|
||||
{"zh-VN", "zh-VN"},
|
||||
{"zh-SG", "zh-SG"},
|
||||
{"zh-Hant", "zh-Hant"},
|
||||
{"zh-Hant-TW", "zh-TW"},
|
||||
{"zh-Hans", "zh"},
|
||||
{"zh-Hani", "zh-Hani"},
|
||||
{"und-Hans", "und-Hans"},
|
||||
{"und-Hani", "und-Hani"},
|
||||
|
||||
{"und-CW", "und-CW"},
|
||||
{"und-YT", "und-YT"},
|
||||
{"und-Arab", "und-Arab"},
|
||||
{"und-AM", "und-AM"},
|
||||
{"und-Arab-CC", "und-Arab-CC"},
|
||||
{"und-CC", "und-CC"},
|
||||
{"und-Latn-BJ", "und-BJ"},
|
||||
{"und-Bugi-ID", "und-Bugi"},
|
||||
{"bug-Bugi-ID", "bug-Bugi"},
|
||||
// regions, scripts and languages without definitions
|
||||
{"und-Arab-AA", "und-Arab-AA"},
|
||||
// preserve variants and extensions
|
||||
{"de-Latn-1901", "de-1901"},
|
||||
{"de-Latn-x-abc", "de-x-abc"},
|
||||
{"de-DE-1901-x-abc", "de-1901-x-abc"},
|
||||
{"x-abc", "x-abc"}, // TODO: is this the desired behavior?
|
||||
}
|
||||
for i, tt := range tests {
|
||||
in, _ := Parse(tt.in)
|
||||
out, _ := Parse(tt.out)
|
||||
min, _ := in.minimize()
|
||||
if min.String() != out.String() {
|
||||
t.Errorf("%d: min(%s) was %s; want %s", i, tt.in, min, tt.out)
|
||||
}
|
||||
max, _ := min.addLikelySubtags()
|
||||
if x, _ := in.addLikelySubtags(); x.String() != max.String() {
|
||||
t.Errorf("%d: max(min(%s)) = %s; want %s", i, tt.in, max, x)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRegionGroups(t *testing.T) {
|
||||
testCases := []struct {
|
||||
a, b string
|
||||
distance uint8
|
||||
}{
|
||||
{"zh-TW", "zh-HK", 5},
|
||||
{"zh-MO", "zh-HK", 4},
|
||||
{"es-ES", "es-AR", 5},
|
||||
{"es-ES", "es", 4},
|
||||
{"es-419", "es-MX", 4},
|
||||
{"es-AR", "es-MX", 4},
|
||||
{"es-ES", "es-MX", 5},
|
||||
{"es-PT", "es-MX", 5},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
a := MustParse(tc.a)
|
||||
aScript, _ := a.Script()
|
||||
b := MustParse(tc.b)
|
||||
bScript, _ := b.Script()
|
||||
|
||||
if aScript != bScript {
|
||||
t.Errorf("scripts differ: %q vs %q", aScript, bScript)
|
||||
continue
|
||||
}
|
||||
d, _ := regionGroupDist(a.region, b.region, aScript.scriptID, a.lang)
|
||||
if d != tc.distance {
|
||||
t.Errorf("got %q; want %q", d, tc.distance)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsParadigmLocale(t *testing.T) {
|
||||
testCases := map[string]bool{
|
||||
"en-US": true,
|
||||
"en-GB": true,
|
||||
"en-VI": false,
|
||||
"es-GB": false,
|
||||
"es-ES": true,
|
||||
"es-419": true,
|
||||
}
|
||||
for str, want := range testCases {
|
||||
tag := Make(str)
|
||||
got := isParadigmLocale(tag.lang, tag.region)
|
||||
if got != want {
|
||||
t.Errorf("isPL(%q) = %v; want %v", str, got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Implementation of String methods for various types for debugging purposes.
|
||||
|
||||
func (m *matcher) String() string {
|
||||
w := &bytes.Buffer{}
|
||||
fmt.Fprintln(w, "Default:", m.default_)
|
||||
for tag, h := range m.index {
|
||||
fmt.Fprintf(w, " %s: %v\n", tag, h)
|
||||
}
|
||||
return w.String()
|
||||
}
|
||||
|
||||
func (h *matchHeader) String() string {
|
||||
w := &bytes.Buffer{}
|
||||
fmt.Fprint(w, "haveTag: ")
|
||||
for _, h := range h.haveTags {
|
||||
fmt.Fprintf(w, "%v, ", h)
|
||||
}
|
||||
return w.String()
|
||||
}
|
||||
|
||||
func (t haveTag) String() string {
|
||||
return fmt.Sprintf("%v:%d:%v:%v-%v|%v", t.tag, t.index, t.conf, t.maxRegion, t.maxScript, t.altScript)
|
||||
}
|
||||
|
||||
func TestBestMatchAlloc(t *testing.T) {
|
||||
m := NewMatcher(makeTagList("en sr nl"))
|
||||
// Go allocates when creating a list of tags from a single tag!
|
||||
list := []Tag{English}
|
||||
avg := testtext.AllocsPerRun(1, func() {
|
||||
m.Match(list...)
|
||||
})
|
||||
if avg > 0 {
|
||||
t.Errorf("got %f; want 0", avg)
|
||||
}
|
||||
}
|
||||
|
||||
var benchHave = []Tag{
|
||||
mk("en"),
|
||||
mk("en-GB"),
|
||||
mk("za"),
|
||||
mk("zh-Hant"),
|
||||
mk("zh-Hans-CN"),
|
||||
mk("zh"),
|
||||
mk("zh-HK"),
|
||||
mk("ar-MK"),
|
||||
mk("en-CA"),
|
||||
mk("fr-CA"),
|
||||
mk("fr-US"),
|
||||
mk("fr-CH"),
|
||||
mk("fr"),
|
||||
mk("lt"),
|
||||
mk("lv"),
|
||||
mk("iw"),
|
||||
mk("iw-NL"),
|
||||
mk("he"),
|
||||
mk("he-IT"),
|
||||
mk("tlh"),
|
||||
mk("ja"),
|
||||
mk("ja-Jpan"),
|
||||
mk("ja-Jpan-JP"),
|
||||
mk("de"),
|
||||
mk("de-CH"),
|
||||
mk("de-AT"),
|
||||
mk("de-DE"),
|
||||
mk("sr"),
|
||||
mk("sr-Latn"),
|
||||
mk("sr-Cyrl"),
|
||||
mk("sr-ME"),
|
||||
}
|
||||
|
||||
var benchWant = [][]Tag{
|
||||
[]Tag{
|
||||
mk("en"),
|
||||
},
|
||||
[]Tag{
|
||||
mk("en-AU"),
|
||||
mk("de-HK"),
|
||||
mk("nl"),
|
||||
mk("fy"),
|
||||
mk("lv"),
|
||||
},
|
||||
[]Tag{
|
||||
mk("en-AU"),
|
||||
mk("de-HK"),
|
||||
mk("nl"),
|
||||
mk("fy"),
|
||||
},
|
||||
[]Tag{
|
||||
mk("ja-Hant"),
|
||||
mk("da-HK"),
|
||||
mk("nl"),
|
||||
mk("zh-TW"),
|
||||
},
|
||||
[]Tag{
|
||||
mk("ja-Hant"),
|
||||
mk("da-HK"),
|
||||
mk("nl"),
|
||||
mk("hr"),
|
||||
},
|
||||
}
|
||||
|
||||
func BenchmarkMatch(b *testing.B) {
|
||||
m := newMatcher(benchHave, nil)
|
||||
for i := 0; i < b.N; i++ {
|
||||
for _, want := range benchWant {
|
||||
m.getBest(want...)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMatchExact(b *testing.B) {
|
||||
want := mk("en")
|
||||
m := newMatcher(benchHave, nil)
|
||||
for i := 0; i < b.N; i++ {
|
||||
m.getBest(want)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMatchAltLanguagePresent(b *testing.B) {
|
||||
want := mk("hr")
|
||||
m := newMatcher(benchHave, nil)
|
||||
for i := 0; i < b.N; i++ {
|
||||
m.getBest(want)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMatchAltLanguageNotPresent(b *testing.B) {
|
||||
want := mk("nn")
|
||||
m := newMatcher(benchHave, nil)
|
||||
for i := 0; i < b.N; i++ {
|
||||
m.getBest(want)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMatchAltScriptPresent(b *testing.B) {
|
||||
want := mk("zh-Hant-CN")
|
||||
m := newMatcher(benchHave, nil)
|
||||
for i := 0; i < b.N; i++ {
|
||||
m.getBest(want)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMatchAltScriptNotPresent(b *testing.B) {
|
||||
want := mk("fr-Cyrl")
|
||||
m := newMatcher(benchHave, nil)
|
||||
for i := 0; i < b.N; i++ {
|
||||
m.getBest(want)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMatchLimitedExact(b *testing.B) {
|
||||
want := []Tag{mk("he-NL"), mk("iw-NL")}
|
||||
m := newMatcher(benchHave, nil)
|
||||
for i := 0; i < b.N; i++ {
|
||||
m.getBest(want...)
|
||||
}
|
||||
}
|
859
vendor/golang.org/x/text/language/parse.go
generated
vendored
Normal file
859
vendor/golang.org/x/text/language/parse.go
generated
vendored
Normal file
@ -0,0 +1,859 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package language
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/text/internal/tag"
|
||||
)
|
||||
|
||||
// isAlpha returns true if the byte is not a digit.
|
||||
// b must be an ASCII letter or digit.
|
||||
func isAlpha(b byte) bool {
|
||||
return b > '9'
|
||||
}
|
||||
|
||||
// isAlphaNum returns true if the string contains only ASCII letters or digits.
|
||||
func isAlphaNum(s []byte) bool {
|
||||
for _, c := range s {
|
||||
if !('a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9') {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// errSyntax is returned by any of the parsing functions when the
|
||||
// input is not well-formed, according to BCP 47.
|
||||
// TODO: return the position at which the syntax error occurred?
|
||||
var errSyntax = errors.New("language: tag is not well-formed")
|
||||
|
||||
// ValueError is returned by any of the parsing functions when the
|
||||
// input is well-formed but the respective subtag is not recognized
|
||||
// as a valid value.
|
||||
type ValueError struct {
|
||||
v [8]byte
|
||||
}
|
||||
|
||||
func mkErrInvalid(s []byte) error {
|
||||
var e ValueError
|
||||
copy(e.v[:], s)
|
||||
return e
|
||||
}
|
||||
|
||||
func (e ValueError) tag() []byte {
|
||||
n := bytes.IndexByte(e.v[:], 0)
|
||||
if n == -1 {
|
||||
n = 8
|
||||
}
|
||||
return e.v[:n]
|
||||
}
|
||||
|
||||
// Error implements the error interface.
|
||||
func (e ValueError) Error() string {
|
||||
return fmt.Sprintf("language: subtag %q is well-formed but unknown", e.tag())
|
||||
}
|
||||
|
||||
// Subtag returns the subtag for which the error occurred.
|
||||
func (e ValueError) Subtag() string {
|
||||
return string(e.tag())
|
||||
}
|
||||
|
||||
// scanner is used to scan BCP 47 tokens, which are separated by _ or -.
|
||||
type scanner struct {
|
||||
b []byte
|
||||
bytes [max99thPercentileSize]byte
|
||||
token []byte
|
||||
start int // start position of the current token
|
||||
end int // end position of the current token
|
||||
next int // next point for scan
|
||||
err error
|
||||
done bool
|
||||
}
|
||||
|
||||
func makeScannerString(s string) scanner {
|
||||
scan := scanner{}
|
||||
if len(s) <= len(scan.bytes) {
|
||||
scan.b = scan.bytes[:copy(scan.bytes[:], s)]
|
||||
} else {
|
||||
scan.b = []byte(s)
|
||||
}
|
||||
scan.init()
|
||||
return scan
|
||||
}
|
||||
|
||||
// makeScanner returns a scanner using b as the input buffer.
|
||||
// b is not copied and may be modified by the scanner routines.
|
||||
func makeScanner(b []byte) scanner {
|
||||
scan := scanner{b: b}
|
||||
scan.init()
|
||||
return scan
|
||||
}
|
||||
|
||||
func (s *scanner) init() {
|
||||
for i, c := range s.b {
|
||||
if c == '_' {
|
||||
s.b[i] = '-'
|
||||
}
|
||||
}
|
||||
s.scan()
|
||||
}
|
||||
|
||||
// restToLower converts the string between start and end to lower case.
|
||||
func (s *scanner) toLower(start, end int) {
|
||||
for i := start; i < end; i++ {
|
||||
c := s.b[i]
|
||||
if 'A' <= c && c <= 'Z' {
|
||||
s.b[i] += 'a' - 'A'
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *scanner) setError(e error) {
|
||||
if s.err == nil || (e == errSyntax && s.err != errSyntax) {
|
||||
s.err = e
|
||||
}
|
||||
}
|
||||
|
||||
// resizeRange shrinks or grows the array at position oldStart such that
|
||||
// a new string of size newSize can fit between oldStart and oldEnd.
|
||||
// Sets the scan point to after the resized range.
|
||||
func (s *scanner) resizeRange(oldStart, oldEnd, newSize int) {
|
||||
s.start = oldStart
|
||||
if end := oldStart + newSize; end != oldEnd {
|
||||
diff := end - oldEnd
|
||||
if end < cap(s.b) {
|
||||
b := make([]byte, len(s.b)+diff)
|
||||
copy(b, s.b[:oldStart])
|
||||
copy(b[end:], s.b[oldEnd:])
|
||||
s.b = b
|
||||
} else {
|
||||
s.b = append(s.b[end:], s.b[oldEnd:]...)
|
||||
}
|
||||
s.next = end + (s.next - s.end)
|
||||
s.end = end
|
||||
}
|
||||
}
|
||||
|
||||
// replace replaces the current token with repl.
|
||||
func (s *scanner) replace(repl string) {
|
||||
s.resizeRange(s.start, s.end, len(repl))
|
||||
copy(s.b[s.start:], repl)
|
||||
}
|
||||
|
||||
// gobble removes the current token from the input.
|
||||
// Caller must call scan after calling gobble.
|
||||
func (s *scanner) gobble(e error) {
|
||||
s.setError(e)
|
||||
if s.start == 0 {
|
||||
s.b = s.b[:+copy(s.b, s.b[s.next:])]
|
||||
s.end = 0
|
||||
} else {
|
||||
s.b = s.b[:s.start-1+copy(s.b[s.start-1:], s.b[s.end:])]
|
||||
s.end = s.start - 1
|
||||
}
|
||||
s.next = s.start
|
||||
}
|
||||
|
||||
// deleteRange removes the given range from s.b before the current token.
|
||||
func (s *scanner) deleteRange(start, end int) {
|
||||
s.setError(errSyntax)
|
||||
s.b = s.b[:start+copy(s.b[start:], s.b[end:])]
|
||||
diff := end - start
|
||||
s.next -= diff
|
||||
s.start -= diff
|
||||
s.end -= diff
|
||||
}
|
||||
|
||||
// scan parses the next token of a BCP 47 string. Tokens that are larger
|
||||
// than 8 characters or include non-alphanumeric characters result in an error
|
||||
// and are gobbled and removed from the output.
|
||||
// It returns the end position of the last token consumed.
|
||||
func (s *scanner) scan() (end int) {
|
||||
end = s.end
|
||||
s.token = nil
|
||||
for s.start = s.next; s.next < len(s.b); {
|
||||
i := bytes.IndexByte(s.b[s.next:], '-')
|
||||
if i == -1 {
|
||||
s.end = len(s.b)
|
||||
s.next = len(s.b)
|
||||
i = s.end - s.start
|
||||
} else {
|
||||
s.end = s.next + i
|
||||
s.next = s.end + 1
|
||||
}
|
||||
token := s.b[s.start:s.end]
|
||||
if i < 1 || i > 8 || !isAlphaNum(token) {
|
||||
s.gobble(errSyntax)
|
||||
continue
|
||||
}
|
||||
s.token = token
|
||||
return end
|
||||
}
|
||||
if n := len(s.b); n > 0 && s.b[n-1] == '-' {
|
||||
s.setError(errSyntax)
|
||||
s.b = s.b[:len(s.b)-1]
|
||||
}
|
||||
s.done = true
|
||||
return end
|
||||
}
|
||||
|
||||
// acceptMinSize parses multiple tokens of the given size or greater.
|
||||
// It returns the end position of the last token consumed.
|
||||
func (s *scanner) acceptMinSize(min int) (end int) {
|
||||
end = s.end
|
||||
s.scan()
|
||||
for ; len(s.token) >= min; s.scan() {
|
||||
end = s.end
|
||||
}
|
||||
return end
|
||||
}
|
||||
|
||||
// Parse parses the given BCP 47 string and returns a valid Tag. If parsing
|
||||
// failed it returns an error and any part of the tag that could be parsed.
|
||||
// If parsing succeeded but an unknown value was found, it returns
|
||||
// ValueError. The Tag returned in this case is just stripped of the unknown
|
||||
// value. All other values are preserved. It accepts tags in the BCP 47 format
|
||||
// and extensions to this standard defined in
|
||||
// http://www.unicode.org/reports/tr35/#Unicode_Language_and_Locale_Identifiers.
|
||||
// The resulting tag is canonicalized using the default canonicalization type.
|
||||
func Parse(s string) (t Tag, err error) {
|
||||
return Default.Parse(s)
|
||||
}
|
||||
|
||||
// Parse parses the given BCP 47 string and returns a valid Tag. If parsing
|
||||
// failed it returns an error and any part of the tag that could be parsed.
|
||||
// If parsing succeeded but an unknown value was found, it returns
|
||||
// ValueError. The Tag returned in this case is just stripped of the unknown
|
||||
// value. All other values are preserved. It accepts tags in the BCP 47 format
|
||||
// and extensions to this standard defined in
|
||||
// http://www.unicode.org/reports/tr35/#Unicode_Language_and_Locale_Identifiers.
|
||||
// The resulting tag is canonicalized using the the canonicalization type c.
|
||||
func (c CanonType) Parse(s string) (t Tag, err error) {
|
||||
// TODO: consider supporting old-style locale key-value pairs.
|
||||
if s == "" {
|
||||
return und, errSyntax
|
||||
}
|
||||
if len(s) <= maxAltTaglen {
|
||||
b := [maxAltTaglen]byte{}
|
||||
for i, c := range s {
|
||||
// Generating invalid UTF-8 is okay as it won't match.
|
||||
if 'A' <= c && c <= 'Z' {
|
||||
c += 'a' - 'A'
|
||||
} else if c == '_' {
|
||||
c = '-'
|
||||
}
|
||||
b[i] = byte(c)
|
||||
}
|
||||
if t, ok := grandfathered(b); ok {
|
||||
return t, nil
|
||||
}
|
||||
}
|
||||
scan := makeScannerString(s)
|
||||
t, err = parse(&scan, s)
|
||||
t, changed := t.canonicalize(c)
|
||||
if changed {
|
||||
t.remakeString()
|
||||
}
|
||||
return t, err
|
||||
}
|
||||
|
||||
func parse(scan *scanner, s string) (t Tag, err error) {
|
||||
t = und
|
||||
var end int
|
||||
if n := len(scan.token); n <= 1 {
|
||||
scan.toLower(0, len(scan.b))
|
||||
if n == 0 || scan.token[0] != 'x' {
|
||||
return t, errSyntax
|
||||
}
|
||||
end = parseExtensions(scan)
|
||||
} else if n >= 4 {
|
||||
return und, errSyntax
|
||||
} else { // the usual case
|
||||
t, end = parseTag(scan)
|
||||
if n := len(scan.token); n == 1 {
|
||||
t.pExt = uint16(end)
|
||||
end = parseExtensions(scan)
|
||||
} else if end < len(scan.b) {
|
||||
scan.setError(errSyntax)
|
||||
scan.b = scan.b[:end]
|
||||
}
|
||||
}
|
||||
if int(t.pVariant) < len(scan.b) {
|
||||
if end < len(s) {
|
||||
s = s[:end]
|
||||
}
|
||||
if len(s) > 0 && tag.Compare(s, scan.b) == 0 {
|
||||
t.str = s
|
||||
} else {
|
||||
t.str = string(scan.b)
|
||||
}
|
||||
} else {
|
||||
t.pVariant, t.pExt = 0, 0
|
||||
}
|
||||
return t, scan.err
|
||||
}
|
||||
|
||||
// parseTag parses language, script, region and variants.
|
||||
// It returns a Tag and the end position in the input that was parsed.
|
||||
func parseTag(scan *scanner) (t Tag, end int) {
|
||||
var e error
|
||||
// TODO: set an error if an unknown lang, script or region is encountered.
|
||||
t.lang, e = getLangID(scan.token)
|
||||
scan.setError(e)
|
||||
scan.replace(t.lang.String())
|
||||
langStart := scan.start
|
||||
end = scan.scan()
|
||||
for len(scan.token) == 3 && isAlpha(scan.token[0]) {
|
||||
// From http://tools.ietf.org/html/bcp47, <lang>-<extlang> tags are equivalent
|
||||
// to a tag of the form <extlang>.
|
||||
lang, e := getLangID(scan.token)
|
||||
if lang != 0 {
|
||||
t.lang = lang
|
||||
copy(scan.b[langStart:], lang.String())
|
||||
scan.b[langStart+3] = '-'
|
||||
scan.start = langStart + 4
|
||||
}
|
||||
scan.gobble(e)
|
||||
end = scan.scan()
|
||||
}
|
||||
if len(scan.token) == 4 && isAlpha(scan.token[0]) {
|
||||
t.script, e = getScriptID(script, scan.token)
|
||||
if t.script == 0 {
|
||||
scan.gobble(e)
|
||||
}
|
||||
end = scan.scan()
|
||||
}
|
||||
if n := len(scan.token); n >= 2 && n <= 3 {
|
||||
t.region, e = getRegionID(scan.token)
|
||||
if t.region == 0 {
|
||||
scan.gobble(e)
|
||||
} else {
|
||||
scan.replace(t.region.String())
|
||||
}
|
||||
end = scan.scan()
|
||||
}
|
||||
scan.toLower(scan.start, len(scan.b))
|
||||
t.pVariant = byte(end)
|
||||
end = parseVariants(scan, end, t)
|
||||
t.pExt = uint16(end)
|
||||
return t, end
|
||||
}
|
||||
|
||||
var separator = []byte{'-'}
|
||||
|
||||
// parseVariants scans tokens as long as each token is a valid variant string.
|
||||
// Duplicate variants are removed.
|
||||
func parseVariants(scan *scanner, end int, t Tag) int {
|
||||
start := scan.start
|
||||
varIDBuf := [4]uint8{}
|
||||
variantBuf := [4][]byte{}
|
||||
varID := varIDBuf[:0]
|
||||
variant := variantBuf[:0]
|
||||
last := -1
|
||||
needSort := false
|
||||
for ; len(scan.token) >= 4; scan.scan() {
|
||||
// TODO: measure the impact of needing this conversion and redesign
|
||||
// the data structure if there is an issue.
|
||||
v, ok := variantIndex[string(scan.token)]
|
||||
if !ok {
|
||||
// unknown variant
|
||||
// TODO: allow user-defined variants?
|
||||
scan.gobble(mkErrInvalid(scan.token))
|
||||
continue
|
||||
}
|
||||
varID = append(varID, v)
|
||||
variant = append(variant, scan.token)
|
||||
if !needSort {
|
||||
if last < int(v) {
|
||||
last = int(v)
|
||||
} else {
|
||||
needSort = true
|
||||
// There is no legal combinations of more than 7 variants
|
||||
// (and this is by no means a useful sequence).
|
||||
const maxVariants = 8
|
||||
if len(varID) > maxVariants {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
end = scan.end
|
||||
}
|
||||
if needSort {
|
||||
sort.Sort(variantsSort{varID, variant})
|
||||
k, l := 0, -1
|
||||
for i, v := range varID {
|
||||
w := int(v)
|
||||
if l == w {
|
||||
// Remove duplicates.
|
||||
continue
|
||||
}
|
||||
varID[k] = varID[i]
|
||||
variant[k] = variant[i]
|
||||
k++
|
||||
l = w
|
||||
}
|
||||
if str := bytes.Join(variant[:k], separator); len(str) == 0 {
|
||||
end = start - 1
|
||||
} else {
|
||||
scan.resizeRange(start, end, len(str))
|
||||
copy(scan.b[scan.start:], str)
|
||||
end = scan.end
|
||||
}
|
||||
}
|
||||
return end
|
||||
}
|
||||
|
||||
type variantsSort struct {
|
||||
i []uint8
|
||||
v [][]byte
|
||||
}
|
||||
|
||||
func (s variantsSort) Len() int {
|
||||
return len(s.i)
|
||||
}
|
||||
|
||||
func (s variantsSort) Swap(i, j int) {
|
||||
s.i[i], s.i[j] = s.i[j], s.i[i]
|
||||
s.v[i], s.v[j] = s.v[j], s.v[i]
|
||||
}
|
||||
|
||||
func (s variantsSort) Less(i, j int) bool {
|
||||
return s.i[i] < s.i[j]
|
||||
}
|
||||
|
||||
type bytesSort [][]byte
|
||||
|
||||
func (b bytesSort) Len() int {
|
||||
return len(b)
|
||||
}
|
||||
|
||||
func (b bytesSort) Swap(i, j int) {
|
||||
b[i], b[j] = b[j], b[i]
|
||||
}
|
||||
|
||||
func (b bytesSort) Less(i, j int) bool {
|
||||
return bytes.Compare(b[i], b[j]) == -1
|
||||
}
|
||||
|
||||
// parseExtensions parses and normalizes the extensions in the buffer.
|
||||
// It returns the last position of scan.b that is part of any extension.
|
||||
// It also trims scan.b to remove excess parts accordingly.
|
||||
func parseExtensions(scan *scanner) int {
|
||||
start := scan.start
|
||||
exts := [][]byte{}
|
||||
private := []byte{}
|
||||
end := scan.end
|
||||
for len(scan.token) == 1 {
|
||||
extStart := scan.start
|
||||
ext := scan.token[0]
|
||||
end = parseExtension(scan)
|
||||
extension := scan.b[extStart:end]
|
||||
if len(extension) < 3 || (ext != 'x' && len(extension) < 4) {
|
||||
scan.setError(errSyntax)
|
||||
end = extStart
|
||||
continue
|
||||
} else if start == extStart && (ext == 'x' || scan.start == len(scan.b)) {
|
||||
scan.b = scan.b[:end]
|
||||
return end
|
||||
} else if ext == 'x' {
|
||||
private = extension
|
||||
break
|
||||
}
|
||||
exts = append(exts, extension)
|
||||
}
|
||||
sort.Sort(bytesSort(exts))
|
||||
if len(private) > 0 {
|
||||
exts = append(exts, private)
|
||||
}
|
||||
scan.b = scan.b[:start]
|
||||
if len(exts) > 0 {
|
||||
scan.b = append(scan.b, bytes.Join(exts, separator)...)
|
||||
} else if start > 0 {
|
||||
// Strip trailing '-'.
|
||||
scan.b = scan.b[:start-1]
|
||||
}
|
||||
return end
|
||||
}
|
||||
|
||||
// parseExtension parses a single extension and returns the position of
|
||||
// the extension end.
|
||||
func parseExtension(scan *scanner) int {
|
||||
start, end := scan.start, scan.end
|
||||
switch scan.token[0] {
|
||||
case 'u':
|
||||
attrStart := end
|
||||
scan.scan()
|
||||
for last := []byte{}; len(scan.token) > 2; scan.scan() {
|
||||
if bytes.Compare(scan.token, last) != -1 {
|
||||
// Attributes are unsorted. Start over from scratch.
|
||||
p := attrStart + 1
|
||||
scan.next = p
|
||||
attrs := [][]byte{}
|
||||
for scan.scan(); len(scan.token) > 2; scan.scan() {
|
||||
attrs = append(attrs, scan.token)
|
||||
end = scan.end
|
||||
}
|
||||
sort.Sort(bytesSort(attrs))
|
||||
copy(scan.b[p:], bytes.Join(attrs, separator))
|
||||
break
|
||||
}
|
||||
last = scan.token
|
||||
end = scan.end
|
||||
}
|
||||
var last, key []byte
|
||||
for attrEnd := end; len(scan.token) == 2; last = key {
|
||||
key = scan.token
|
||||
keyEnd := scan.end
|
||||
end = scan.acceptMinSize(3)
|
||||
// TODO: check key value validity
|
||||
if keyEnd == end || bytes.Compare(key, last) != 1 {
|
||||
// We have an invalid key or the keys are not sorted.
|
||||
// Start scanning keys from scratch and reorder.
|
||||
p := attrEnd + 1
|
||||
scan.next = p
|
||||
keys := [][]byte{}
|
||||
for scan.scan(); len(scan.token) == 2; {
|
||||
keyStart, keyEnd := scan.start, scan.end
|
||||
end = scan.acceptMinSize(3)
|
||||
if keyEnd != end {
|
||||
keys = append(keys, scan.b[keyStart:end])
|
||||
} else {
|
||||
scan.setError(errSyntax)
|
||||
end = keyStart
|
||||
}
|
||||
}
|
||||
sort.Sort(bytesSort(keys))
|
||||
reordered := bytes.Join(keys, separator)
|
||||
if e := p + len(reordered); e < end {
|
||||
scan.deleteRange(e, end)
|
||||
end = e
|
||||
}
|
||||
copy(scan.b[p:], bytes.Join(keys, separator))
|
||||
break
|
||||
}
|
||||
}
|
||||
case 't':
|
||||
scan.scan()
|
||||
if n := len(scan.token); n >= 2 && n <= 3 && isAlpha(scan.token[1]) {
|
||||
_, end = parseTag(scan)
|
||||
scan.toLower(start, end)
|
||||
}
|
||||
for len(scan.token) == 2 && !isAlpha(scan.token[1]) {
|
||||
end = scan.acceptMinSize(3)
|
||||
}
|
||||
case 'x':
|
||||
end = scan.acceptMinSize(1)
|
||||
default:
|
||||
end = scan.acceptMinSize(2)
|
||||
}
|
||||
return end
|
||||
}
|
||||
|
||||
// Compose creates a Tag from individual parts, which may be of type Tag, Base,
|
||||
// Script, Region, Variant, []Variant, Extension, []Extension or error. If a
|
||||
// Base, Script or Region or slice of type Variant or Extension is passed more
|
||||
// than once, the latter will overwrite the former. Variants and Extensions are
|
||||
// accumulated, but if two extensions of the same type are passed, the latter
|
||||
// will replace the former. A Tag overwrites all former values and typically
|
||||
// only makes sense as the first argument. The resulting tag is returned after
|
||||
// canonicalizing using the Default CanonType. If one or more errors are
|
||||
// encountered, one of the errors is returned.
|
||||
func Compose(part ...interface{}) (t Tag, err error) {
|
||||
return Default.Compose(part...)
|
||||
}
|
||||
|
||||
// Compose creates a Tag from individual parts, which may be of type Tag, Base,
|
||||
// Script, Region, Variant, []Variant, Extension, []Extension or error. If a
|
||||
// Base, Script or Region or slice of type Variant or Extension is passed more
|
||||
// than once, the latter will overwrite the former. Variants and Extensions are
|
||||
// accumulated, but if two extensions of the same type are passed, the latter
|
||||
// will replace the former. A Tag overwrites all former values and typically
|
||||
// only makes sense as the first argument. The resulting tag is returned after
|
||||
// canonicalizing using CanonType c. If one or more errors are encountered,
|
||||
// one of the errors is returned.
|
||||
func (c CanonType) Compose(part ...interface{}) (t Tag, err error) {
|
||||
var b builder
|
||||
if err = b.update(part...); err != nil {
|
||||
return und, err
|
||||
}
|
||||
t, _ = b.tag.canonicalize(c)
|
||||
|
||||
if len(b.ext) > 0 || len(b.variant) > 0 {
|
||||
sort.Sort(sortVariant(b.variant))
|
||||
sort.Strings(b.ext)
|
||||
if b.private != "" {
|
||||
b.ext = append(b.ext, b.private)
|
||||
}
|
||||
n := maxCoreSize + tokenLen(b.variant...) + tokenLen(b.ext...)
|
||||
buf := make([]byte, n)
|
||||
p := t.genCoreBytes(buf)
|
||||
t.pVariant = byte(p)
|
||||
p += appendTokens(buf[p:], b.variant...)
|
||||
t.pExt = uint16(p)
|
||||
p += appendTokens(buf[p:], b.ext...)
|
||||
t.str = string(buf[:p])
|
||||
} else if b.private != "" {
|
||||
t.str = b.private
|
||||
t.remakeString()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type builder struct {
|
||||
tag Tag
|
||||
|
||||
private string // the x extension
|
||||
ext []string
|
||||
variant []string
|
||||
|
||||
err error
|
||||
}
|
||||
|
||||
func (b *builder) addExt(e string) {
|
||||
if e == "" {
|
||||
} else if e[0] == 'x' {
|
||||
b.private = e
|
||||
} else {
|
||||
b.ext = append(b.ext, e)
|
||||
}
|
||||
}
|
||||
|
||||
var errInvalidArgument = errors.New("invalid Extension or Variant")
|
||||
|
||||
func (b *builder) update(part ...interface{}) (err error) {
|
||||
replace := func(l *[]string, s string, eq func(a, b string) bool) bool {
|
||||
if s == "" {
|
||||
b.err = errInvalidArgument
|
||||
return true
|
||||
}
|
||||
for i, v := range *l {
|
||||
if eq(v, s) {
|
||||
(*l)[i] = s
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
for _, x := range part {
|
||||
switch v := x.(type) {
|
||||
case Tag:
|
||||
b.tag.lang = v.lang
|
||||
b.tag.region = v.region
|
||||
b.tag.script = v.script
|
||||
if v.str != "" {
|
||||
b.variant = nil
|
||||
for x, s := "", v.str[v.pVariant:v.pExt]; s != ""; {
|
||||
x, s = nextToken(s)
|
||||
b.variant = append(b.variant, x)
|
||||
}
|
||||
b.ext, b.private = nil, ""
|
||||
for i, e := int(v.pExt), ""; i < len(v.str); {
|
||||
i, e = getExtension(v.str, i)
|
||||
b.addExt(e)
|
||||
}
|
||||
}
|
||||
case Base:
|
||||
b.tag.lang = v.langID
|
||||
case Script:
|
||||
b.tag.script = v.scriptID
|
||||
case Region:
|
||||
b.tag.region = v.regionID
|
||||
case Variant:
|
||||
if !replace(&b.variant, v.variant, func(a, b string) bool { return a == b }) {
|
||||
b.variant = append(b.variant, v.variant)
|
||||
}
|
||||
case Extension:
|
||||
if !replace(&b.ext, v.s, func(a, b string) bool { return a[0] == b[0] }) {
|
||||
b.addExt(v.s)
|
||||
}
|
||||
case []Variant:
|
||||
b.variant = nil
|
||||
for _, x := range v {
|
||||
b.update(x)
|
||||
}
|
||||
case []Extension:
|
||||
b.ext, b.private = nil, ""
|
||||
for _, e := range v {
|
||||
b.update(e)
|
||||
}
|
||||
// TODO: support parsing of raw strings based on morphology or just extensions?
|
||||
case error:
|
||||
err = v
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func tokenLen(token ...string) (n int) {
|
||||
for _, t := range token {
|
||||
n += len(t) + 1
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func appendTokens(b []byte, token ...string) int {
|
||||
p := 0
|
||||
for _, t := range token {
|
||||
b[p] = '-'
|
||||
copy(b[p+1:], t)
|
||||
p += 1 + len(t)
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
type sortVariant []string
|
||||
|
||||
func (s sortVariant) Len() int {
|
||||
return len(s)
|
||||
}
|
||||
|
||||
func (s sortVariant) Swap(i, j int) {
|
||||
s[j], s[i] = s[i], s[j]
|
||||
}
|
||||
|
||||
func (s sortVariant) Less(i, j int) bool {
|
||||
return variantIndex[s[i]] < variantIndex[s[j]]
|
||||
}
|
||||
|
||||
func findExt(list []string, x byte) int {
|
||||
for i, e := range list {
|
||||
if e[0] == x {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
// getExtension returns the name, body and end position of the extension.
|
||||
func getExtension(s string, p int) (end int, ext string) {
|
||||
if s[p] == '-' {
|
||||
p++
|
||||
}
|
||||
if s[p] == 'x' {
|
||||
return len(s), s[p:]
|
||||
}
|
||||
end = nextExtension(s, p)
|
||||
return end, s[p:end]
|
||||
}
|
||||
|
||||
// nextExtension finds the next extension within the string, searching
|
||||
// for the -<char>- pattern from position p.
|
||||
// In the fast majority of cases, language tags will have at most
|
||||
// one extension and extensions tend to be small.
|
||||
func nextExtension(s string, p int) int {
|
||||
for n := len(s) - 3; p < n; {
|
||||
if s[p] == '-' {
|
||||
if s[p+2] == '-' {
|
||||
return p
|
||||
}
|
||||
p += 3
|
||||
} else {
|
||||
p++
|
||||
}
|
||||
}
|
||||
return len(s)
|
||||
}
|
||||
|
||||
var errInvalidWeight = errors.New("ParseAcceptLanguage: invalid weight")
|
||||
|
||||
// ParseAcceptLanguage parses the contents of an Accept-Language header as
|
||||
// defined in http://www.ietf.org/rfc/rfc2616.txt and returns a list of Tags and
|
||||
// a list of corresponding quality weights. It is more permissive than RFC 2616
|
||||
// and may return non-nil slices even if the input is not valid.
|
||||
// The Tags will be sorted by highest weight first and then by first occurrence.
|
||||
// Tags with a weight of zero will be dropped. An error will be returned if the
|
||||
// input could not be parsed.
|
||||
func ParseAcceptLanguage(s string) (tag []Tag, q []float32, err error) {
|
||||
var entry string
|
||||
for s != "" {
|
||||
if entry, s = split(s, ','); entry == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
entry, weight := split(entry, ';')
|
||||
|
||||
// Scan the language.
|
||||
t, err := Parse(entry)
|
||||
if err != nil {
|
||||
id, ok := acceptFallback[entry]
|
||||
if !ok {
|
||||
return nil, nil, err
|
||||
}
|
||||
t = Tag{lang: id}
|
||||
}
|
||||
|
||||
// Scan the optional weight.
|
||||
w := 1.0
|
||||
if weight != "" {
|
||||
weight = consume(weight, 'q')
|
||||
weight = consume(weight, '=')
|
||||
// consume returns the empty string when a token could not be
|
||||
// consumed, resulting in an error for ParseFloat.
|
||||
if w, err = strconv.ParseFloat(weight, 32); err != nil {
|
||||
return nil, nil, errInvalidWeight
|
||||
}
|
||||
// Drop tags with a quality weight of 0.
|
||||
if w <= 0 {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
tag = append(tag, t)
|
||||
q = append(q, float32(w))
|
||||
}
|
||||
sortStable(&tagSort{tag, q})
|
||||
return tag, q, nil
|
||||
}
|
||||
|
||||
// consume removes a leading token c from s and returns the result or the empty
|
||||
// string if there is no such token.
|
||||
func consume(s string, c byte) string {
|
||||
if s == "" || s[0] != c {
|
||||
return ""
|
||||
}
|
||||
return strings.TrimSpace(s[1:])
|
||||
}
|
||||
|
||||
func split(s string, c byte) (head, tail string) {
|
||||
if i := strings.IndexByte(s, c); i >= 0 {
|
||||
return strings.TrimSpace(s[:i]), strings.TrimSpace(s[i+1:])
|
||||
}
|
||||
return strings.TrimSpace(s), ""
|
||||
}
|
||||
|
||||
// Add hack mapping to deal with a small number of cases that that occur
|
||||
// in Accept-Language (with reasonable frequency).
|
||||
var acceptFallback = map[string]langID{
|
||||
"english": _en,
|
||||
"deutsch": _de,
|
||||
"italian": _it,
|
||||
"french": _fr,
|
||||
"*": _mul, // defined in the spec to match all languages.
|
||||
}
|
||||
|
||||
type tagSort struct {
|
||||
tag []Tag
|
||||
q []float32
|
||||
}
|
||||
|
||||
func (s *tagSort) Len() int {
|
||||
return len(s.q)
|
||||
}
|
||||
|
||||
func (s *tagSort) Less(i, j int) bool {
|
||||
return s.q[i] > s.q[j]
|
||||
}
|
||||
|
||||
func (s *tagSort) Swap(i, j int) {
|
||||
s.tag[i], s.tag[j] = s.tag[j], s.tag[i]
|
||||
s.q[i], s.q[j] = s.q[j], s.q[i]
|
||||
}
|
517
vendor/golang.org/x/text/language/parse_test.go
generated
vendored
Normal file
517
vendor/golang.org/x/text/language/parse_test.go
generated
vendored
Normal file
@ -0,0 +1,517 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package language
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"golang.org/x/text/internal/tag"
|
||||
)
|
||||
|
||||
type scanTest struct {
|
||||
ok bool // true if scanning does not result in an error
|
||||
in string
|
||||
tok []string // the expected tokens
|
||||
}
|
||||
|
||||
var tests = []scanTest{
|
||||
{true, "", []string{}},
|
||||
{true, "1", []string{"1"}},
|
||||
{true, "en", []string{"en"}},
|
||||
{true, "root", []string{"root"}},
|
||||
{true, "maxchars", []string{"maxchars"}},
|
||||
{false, "bad/", []string{}},
|
||||
{false, "morethan8", []string{}},
|
||||
{false, "-", []string{}},
|
||||
{false, "----", []string{}},
|
||||
{false, "_", []string{}},
|
||||
{true, "en-US", []string{"en", "US"}},
|
||||
{true, "en_US", []string{"en", "US"}},
|
||||
{false, "en-US-", []string{"en", "US"}},
|
||||
{false, "en-US--", []string{"en", "US"}},
|
||||
{false, "en-US---", []string{"en", "US"}},
|
||||
{false, "en--US", []string{"en", "US"}},
|
||||
{false, "-en-US", []string{"en", "US"}},
|
||||
{false, "-en--US-", []string{"en", "US"}},
|
||||
{false, "-en--US-", []string{"en", "US"}},
|
||||
{false, "en-.-US", []string{"en", "US"}},
|
||||
{false, ".-en--US-.", []string{"en", "US"}},
|
||||
{false, "en-u.-US", []string{"en", "US"}},
|
||||
{true, "en-u1-US", []string{"en", "u1", "US"}},
|
||||
{true, "maxchar1_maxchar2-maxchar3", []string{"maxchar1", "maxchar2", "maxchar3"}},
|
||||
{false, "moreThan8-moreThan8-e", []string{"e"}},
|
||||
}
|
||||
|
||||
func TestScan(t *testing.T) {
|
||||
for i, tt := range tests {
|
||||
scan := makeScannerString(tt.in)
|
||||
for j := 0; !scan.done; j++ {
|
||||
if j >= len(tt.tok) {
|
||||
t.Errorf("%d: extra token %q", i, scan.token)
|
||||
} else if tag.Compare(tt.tok[j], scan.token) != 0 {
|
||||
t.Errorf("%d: token %d: found %q; want %q", i, j, scan.token, tt.tok[j])
|
||||
break
|
||||
}
|
||||
scan.scan()
|
||||
}
|
||||
if s := strings.Join(tt.tok, "-"); tag.Compare(s, bytes.Replace(scan.b, b("_"), b("-"), -1)) != 0 {
|
||||
t.Errorf("%d: input: found %q; want %q", i, scan.b, s)
|
||||
}
|
||||
if (scan.err == nil) != tt.ok {
|
||||
t.Errorf("%d: ok: found %v; want %v", i, scan.err == nil, tt.ok)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAcceptMinSize(t *testing.T) {
|
||||
for i, tt := range tests {
|
||||
// count number of successive tokens with a minimum size.
|
||||
for sz := 1; sz <= 8; sz++ {
|
||||
scan := makeScannerString(tt.in)
|
||||
scan.end, scan.next = 0, 0
|
||||
end := scan.acceptMinSize(sz)
|
||||
n := 0
|
||||
for i := 0; i < len(tt.tok) && len(tt.tok[i]) >= sz; i++ {
|
||||
n += len(tt.tok[i])
|
||||
if i > 0 {
|
||||
n++
|
||||
}
|
||||
}
|
||||
if end != n {
|
||||
t.Errorf("%d:%d: found len %d; want %d", i, sz, end, n)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type parseTest struct {
|
||||
i int // the index of this test
|
||||
in string
|
||||
lang, script, region string
|
||||
variants, ext string
|
||||
extList []string // only used when more than one extension is present
|
||||
invalid bool
|
||||
rewrite bool // special rewrite not handled by parseTag
|
||||
changed bool // string needed to be reformatted
|
||||
}
|
||||
|
||||
func parseTests() []parseTest {
|
||||
tests := []parseTest{
|
||||
{in: "root", lang: "und"},
|
||||
{in: "und", lang: "und"},
|
||||
{in: "en", lang: "en"},
|
||||
{in: "xy", lang: "und", invalid: true},
|
||||
{in: "en-ZY", lang: "en", invalid: true},
|
||||
{in: "gsw", lang: "gsw"},
|
||||
{in: "sr_Latn", lang: "sr", script: "Latn"},
|
||||
{in: "af-Arab", lang: "af", script: "Arab"},
|
||||
{in: "nl-BE", lang: "nl", region: "BE"},
|
||||
{in: "es-419", lang: "es", region: "419"},
|
||||
{in: "und-001", lang: "und", region: "001"},
|
||||
{in: "de-latn-be", lang: "de", script: "Latn", region: "BE"},
|
||||
// Variants
|
||||
{in: "de-1901", lang: "de", variants: "1901"},
|
||||
// Accept with unsuppressed script.
|
||||
{in: "de-Latn-1901", lang: "de", script: "Latn", variants: "1901"},
|
||||
// Specialized.
|
||||
{in: "sl-rozaj", lang: "sl", variants: "rozaj"},
|
||||
{in: "sl-rozaj-lipaw", lang: "sl", variants: "rozaj-lipaw"},
|
||||
{in: "sl-rozaj-biske", lang: "sl", variants: "rozaj-biske"},
|
||||
{in: "sl-rozaj-biske-1994", lang: "sl", variants: "rozaj-biske-1994"},
|
||||
{in: "sl-rozaj-1994", lang: "sl", variants: "rozaj-1994"},
|
||||
// Maximum number of variants while adhering to prefix rules.
|
||||
{in: "sl-rozaj-biske-1994-alalc97-fonipa-fonupa-fonxsamp", lang: "sl", variants: "rozaj-biske-1994-alalc97-fonipa-fonupa-fonxsamp"},
|
||||
|
||||
// Sorting.
|
||||
{in: "sl-1994-biske-rozaj", lang: "sl", variants: "rozaj-biske-1994", changed: true},
|
||||
{in: "sl-rozaj-biske-1994-alalc97-fonupa-fonipa-fonxsamp", lang: "sl", variants: "rozaj-biske-1994-alalc97-fonipa-fonupa-fonxsamp", changed: true},
|
||||
{in: "nl-fonxsamp-alalc97-fonipa-fonupa", lang: "nl", variants: "alalc97-fonipa-fonupa-fonxsamp", changed: true},
|
||||
|
||||
// Duplicates variants are removed, but not an error.
|
||||
{in: "nl-fonupa-fonupa", lang: "nl", variants: "fonupa"},
|
||||
|
||||
// Variants that do not have correct prefixes. We still accept these.
|
||||
{in: "de-Cyrl-1901", lang: "de", script: "Cyrl", variants: "1901"},
|
||||
{in: "sl-rozaj-lipaw-1994", lang: "sl", variants: "rozaj-lipaw-1994"},
|
||||
{in: "sl-1994-biske-rozaj-1994-biske-rozaj", lang: "sl", variants: "rozaj-biske-1994", changed: true},
|
||||
{in: "de-Cyrl-1901", lang: "de", script: "Cyrl", variants: "1901"},
|
||||
|
||||
// Invalid variant.
|
||||
{in: "de-1902", lang: "de", variants: "", invalid: true},
|
||||
|
||||
{in: "EN_CYRL", lang: "en", script: "Cyrl"},
|
||||
// private use and extensions
|
||||
{in: "x-a-b-c-d", ext: "x-a-b-c-d"},
|
||||
{in: "x_A.-B-C_D", ext: "x-b-c-d", invalid: true, changed: true},
|
||||
{in: "x-aa-bbbb-cccccccc-d", ext: "x-aa-bbbb-cccccccc-d"},
|
||||
{in: "en-c_cc-b-bbb-a-aaa", lang: "en", changed: true, extList: []string{"a-aaa", "b-bbb", "c-cc"}},
|
||||
{in: "en-x_cc-b-bbb-a-aaa", lang: "en", ext: "x-cc-b-bbb-a-aaa", changed: true},
|
||||
{in: "en-c_cc-b-bbb-a-aaa-x-x", lang: "en", changed: true, extList: []string{"a-aaa", "b-bbb", "c-cc", "x-x"}},
|
||||
{in: "en-v-c", lang: "en", ext: "", invalid: true},
|
||||
{in: "en-v-abcdefghi", lang: "en", ext: "", invalid: true},
|
||||
{in: "en-v-abc-x", lang: "en", ext: "v-abc", invalid: true},
|
||||
{in: "en-v-abc-x-", lang: "en", ext: "v-abc", invalid: true},
|
||||
{in: "en-v-abc-w-x-xx", lang: "en", extList: []string{"v-abc", "x-xx"}, invalid: true, changed: true},
|
||||
{in: "en-v-abc-w-y-yx", lang: "en", extList: []string{"v-abc", "y-yx"}, invalid: true, changed: true},
|
||||
{in: "en-v-c-abc", lang: "en", ext: "c-abc", invalid: true, changed: true},
|
||||
{in: "en-v-w-abc", lang: "en", ext: "w-abc", invalid: true, changed: true},
|
||||
{in: "en-v-x-abc", lang: "en", ext: "x-abc", invalid: true, changed: true},
|
||||
{in: "en-v-x-a", lang: "en", ext: "x-a", invalid: true, changed: true},
|
||||
{in: "en-9-aa-0-aa-z-bb-x-a", lang: "en", extList: []string{"0-aa", "9-aa", "z-bb", "x-a"}, changed: true},
|
||||
{in: "en-u-c", lang: "en", ext: "", invalid: true},
|
||||
{in: "en-u-co-phonebk", lang: "en", ext: "u-co-phonebk"},
|
||||
{in: "en-u-co-phonebk-ca", lang: "en", ext: "u-co-phonebk", invalid: true},
|
||||
{in: "en-u-nu-arabic-co-phonebk-ca", lang: "en", ext: "u-co-phonebk-nu-arabic", invalid: true, changed: true},
|
||||
{in: "en-u-nu-arabic-co-phonebk-ca-x", lang: "en", ext: "u-co-phonebk-nu-arabic", invalid: true, changed: true},
|
||||
{in: "en-u-nu-arabic-co-phonebk-ca-s", lang: "en", ext: "u-co-phonebk-nu-arabic", invalid: true, changed: true},
|
||||
{in: "en-u-nu-arabic-co-phonebk-ca-a12345678", lang: "en", ext: "u-co-phonebk-nu-arabic", invalid: true, changed: true},
|
||||
{in: "en-u-co-phonebook", lang: "en", ext: "", invalid: true},
|
||||
{in: "en-u-co-phonebook-cu-xau", lang: "en", ext: "u-cu-xau", invalid: true, changed: true},
|
||||
{in: "en-Cyrl-u-co-phonebk", lang: "en", script: "Cyrl", ext: "u-co-phonebk"},
|
||||
{in: "en-US-u-co-phonebk", lang: "en", region: "US", ext: "u-co-phonebk"},
|
||||
{in: "en-US-u-co-phonebk-cu-xau", lang: "en", region: "US", ext: "u-co-phonebk-cu-xau"},
|
||||
{in: "en-scotland-u-co-phonebk", lang: "en", variants: "scotland", ext: "u-co-phonebk"},
|
||||
{in: "en-u-cu-xua-co-phonebk", lang: "en", ext: "u-co-phonebk-cu-xua", changed: true},
|
||||
{in: "en-u-def-abc-cu-xua-co-phonebk", lang: "en", ext: "u-abc-def-co-phonebk-cu-xua", changed: true},
|
||||
{in: "en-u-def-abc", lang: "en", ext: "u-abc-def", changed: true},
|
||||
{in: "en-u-cu-xua-co-phonebk-a-cd", lang: "en", extList: []string{"a-cd", "u-co-phonebk-cu-xua"}, changed: true},
|
||||
// Invalid "u" extension. Drop invalid parts.
|
||||
{in: "en-u-cu-co-phonebk", lang: "en", extList: []string{"u-co-phonebk"}, invalid: true, changed: true},
|
||||
{in: "en-u-cu-xau-co", lang: "en", extList: []string{"u-cu-xau"}, invalid: true},
|
||||
// We allow duplicate keys as the LDML spec does not explicitly prohibit it.
|
||||
// TODO: Consider eliminating duplicates and returning an error.
|
||||
{in: "en-u-cu-xau-co-phonebk-cu-xau", lang: "en", ext: "u-co-phonebk-cu-xau-cu-xau", changed: true},
|
||||
{in: "en-t-en-Cyrl-NL-fonipa", lang: "en", ext: "t-en-cyrl-nl-fonipa", changed: true},
|
||||
{in: "en-t-en-Cyrl-NL-fonipa-t0-abc-def", lang: "en", ext: "t-en-cyrl-nl-fonipa-t0-abc-def", changed: true},
|
||||
{in: "en-t-t0-abcd", lang: "en", ext: "t-t0-abcd"},
|
||||
// Not necessary to have changed here.
|
||||
{in: "en-t-nl-abcd", lang: "en", ext: "t-nl", invalid: true},
|
||||
{in: "en-t-nl-latn", lang: "en", ext: "t-nl-latn"},
|
||||
{in: "en-t-t0-abcd-x-a", lang: "en", extList: []string{"t-t0-abcd", "x-a"}},
|
||||
// invalid
|
||||
{in: "", lang: "und", invalid: true},
|
||||
{in: "-", lang: "und", invalid: true},
|
||||
{in: "x", lang: "und", invalid: true},
|
||||
{in: "x-", lang: "und", invalid: true},
|
||||
{in: "x--", lang: "und", invalid: true},
|
||||
{in: "a-a-b-c-d", lang: "und", invalid: true},
|
||||
{in: "en-", lang: "en", invalid: true},
|
||||
{in: "enne-", lang: "und", invalid: true},
|
||||
{in: "en.", lang: "und", invalid: true},
|
||||
{in: "en.-latn", lang: "und", invalid: true},
|
||||
{in: "en.-en", lang: "en", invalid: true},
|
||||
{in: "x-a-tooManyChars-c-d", ext: "x-a-c-d", invalid: true, changed: true},
|
||||
{in: "a-tooManyChars-c-d", lang: "und", invalid: true},
|
||||
// TODO: check key-value validity
|
||||
// { in: "en-u-cu-xd", lang: "en", ext: "u-cu-xd", invalid: true },
|
||||
{in: "en-t-abcd", lang: "en", invalid: true},
|
||||
{in: "en-Latn-US-en", lang: "en", script: "Latn", region: "US", invalid: true},
|
||||
// rewrites (more tests in TestGrandfathered)
|
||||
{in: "zh-min-nan", lang: "nan"},
|
||||
{in: "zh-yue", lang: "yue"},
|
||||
{in: "zh-xiang", lang: "hsn", rewrite: true},
|
||||
{in: "zh-guoyu", lang: "cmn", rewrite: true},
|
||||
{in: "iw", lang: "iw"},
|
||||
{in: "sgn-BE-FR", lang: "sfb", rewrite: true},
|
||||
{in: "i-klingon", lang: "tlh", rewrite: true},
|
||||
}
|
||||
for i, tt := range tests {
|
||||
tests[i].i = i
|
||||
if tt.extList != nil {
|
||||
tests[i].ext = strings.Join(tt.extList, "-")
|
||||
}
|
||||
if tt.ext != "" && tt.extList == nil {
|
||||
tests[i].extList = []string{tt.ext}
|
||||
}
|
||||
}
|
||||
return tests
|
||||
}
|
||||
|
||||
func TestParseExtensions(t *testing.T) {
|
||||
for i, tt := range parseTests() {
|
||||
if tt.ext == "" || tt.rewrite {
|
||||
continue
|
||||
}
|
||||
scan := makeScannerString(tt.in)
|
||||
if len(scan.b) > 1 && scan.b[1] != '-' {
|
||||
scan.end = nextExtension(string(scan.b), 0)
|
||||
scan.next = scan.end + 1
|
||||
scan.scan()
|
||||
}
|
||||
start := scan.start
|
||||
scan.toLower(start, len(scan.b))
|
||||
parseExtensions(&scan)
|
||||
ext := string(scan.b[start:])
|
||||
if ext != tt.ext {
|
||||
t.Errorf("%d(%s): ext was %v; want %v", i, tt.in, ext, tt.ext)
|
||||
}
|
||||
if changed := !strings.HasPrefix(tt.in[start:], ext); changed != tt.changed {
|
||||
t.Errorf("%d(%s): changed was %v; want %v", i, tt.in, changed, tt.changed)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// partChecks runs checks for each part by calling the function returned by f.
|
||||
func partChecks(t *testing.T, f func(*parseTest) (Tag, bool)) {
|
||||
for i, tt := range parseTests() {
|
||||
tag, skip := f(&tt)
|
||||
if skip {
|
||||
continue
|
||||
}
|
||||
if l, _ := getLangID(b(tt.lang)); l != tag.lang {
|
||||
t.Errorf("%d: lang was %q; want %q", i, tag.lang, l)
|
||||
}
|
||||
if sc, _ := getScriptID(script, b(tt.script)); sc != tag.script {
|
||||
t.Errorf("%d: script was %q; want %q", i, tag.script, sc)
|
||||
}
|
||||
if r, _ := getRegionID(b(tt.region)); r != tag.region {
|
||||
t.Errorf("%d: region was %q; want %q", i, tag.region, r)
|
||||
}
|
||||
if tag.str == "" {
|
||||
continue
|
||||
}
|
||||
p := int(tag.pVariant)
|
||||
if p < int(tag.pExt) {
|
||||
p++
|
||||
}
|
||||
if s, g := tag.str[p:tag.pExt], tt.variants; s != g {
|
||||
t.Errorf("%d: variants was %q; want %q", i, s, g)
|
||||
}
|
||||
p = int(tag.pExt)
|
||||
if p > 0 && p < len(tag.str) {
|
||||
p++
|
||||
}
|
||||
if s, g := (tag.str)[p:], tt.ext; s != g {
|
||||
t.Errorf("%d: extensions were %q; want %q", i, s, g)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseTag(t *testing.T) {
|
||||
partChecks(t, func(tt *parseTest) (id Tag, skip bool) {
|
||||
if strings.HasPrefix(tt.in, "x-") || tt.rewrite {
|
||||
return Tag{}, true
|
||||
}
|
||||
scan := makeScannerString(tt.in)
|
||||
id, end := parseTag(&scan)
|
||||
id.str = string(scan.b[:end])
|
||||
tt.ext = ""
|
||||
tt.extList = []string{}
|
||||
return id, false
|
||||
})
|
||||
}
|
||||
|
||||
func TestParse(t *testing.T) {
|
||||
partChecks(t, func(tt *parseTest) (id Tag, skip bool) {
|
||||
id, err := Raw.Parse(tt.in)
|
||||
ext := ""
|
||||
if id.str != "" {
|
||||
if strings.HasPrefix(id.str, "x-") {
|
||||
ext = id.str
|
||||
} else if int(id.pExt) < len(id.str) && id.pExt > 0 {
|
||||
ext = id.str[id.pExt+1:]
|
||||
}
|
||||
}
|
||||
if tag, _ := Raw.Parse(id.String()); tag.String() != id.String() {
|
||||
t.Errorf("%d:%s: reparse was %q; want %q", tt.i, tt.in, id.String(), tag.String())
|
||||
}
|
||||
if ext != tt.ext {
|
||||
t.Errorf("%d:%s: ext was %q; want %q", tt.i, tt.in, ext, tt.ext)
|
||||
}
|
||||
changed := id.str != "" && !strings.HasPrefix(tt.in, id.str)
|
||||
if changed != tt.changed {
|
||||
t.Errorf("%d:%s: changed was %v; want %v", tt.i, tt.in, changed, tt.changed)
|
||||
}
|
||||
if (err != nil) != tt.invalid {
|
||||
t.Errorf("%d:%s: invalid was %v; want %v. Error: %v", tt.i, tt.in, err != nil, tt.invalid, err)
|
||||
}
|
||||
return id, false
|
||||
})
|
||||
}
|
||||
|
||||
func TestErrors(t *testing.T) {
|
||||
mkInvalid := func(s string) error {
|
||||
return mkErrInvalid([]byte(s))
|
||||
}
|
||||
tests := []struct {
|
||||
in string
|
||||
out error
|
||||
}{
|
||||
// invalid subtags.
|
||||
{"ac", mkInvalid("ac")},
|
||||
{"AC", mkInvalid("ac")},
|
||||
{"aa-Uuuu", mkInvalid("Uuuu")},
|
||||
{"aa-AB", mkInvalid("AB")},
|
||||
// ill-formed wins over invalid.
|
||||
{"ac-u", errSyntax},
|
||||
{"ac-u-ca", errSyntax},
|
||||
{"ac-u-ca-co-pinyin", errSyntax},
|
||||
{"noob", errSyntax},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
_, err := Parse(tt.in)
|
||||
if err != tt.out {
|
||||
t.Errorf("%s: was %q; want %q", tt.in, err, tt.out)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCompose1(t *testing.T) {
|
||||
partChecks(t, func(tt *parseTest) (id Tag, skip bool) {
|
||||
l, _ := ParseBase(tt.lang)
|
||||
s, _ := ParseScript(tt.script)
|
||||
r, _ := ParseRegion(tt.region)
|
||||
v := []Variant{}
|
||||
for _, x := range strings.Split(tt.variants, "-") {
|
||||
p, _ := ParseVariant(x)
|
||||
v = append(v, p)
|
||||
}
|
||||
e := []Extension{}
|
||||
for _, x := range tt.extList {
|
||||
p, _ := ParseExtension(x)
|
||||
e = append(e, p)
|
||||
}
|
||||
id, _ = Raw.Compose(l, s, r, v, e)
|
||||
return id, false
|
||||
})
|
||||
}
|
||||
|
||||
func TestCompose2(t *testing.T) {
|
||||
partChecks(t, func(tt *parseTest) (id Tag, skip bool) {
|
||||
l, _ := ParseBase(tt.lang)
|
||||
s, _ := ParseScript(tt.script)
|
||||
r, _ := ParseRegion(tt.region)
|
||||
p := []interface{}{l, s, r, s, r, l}
|
||||
for _, x := range strings.Split(tt.variants, "-") {
|
||||
v, _ := ParseVariant(x)
|
||||
p = append(p, v)
|
||||
}
|
||||
for _, x := range tt.extList {
|
||||
e, _ := ParseExtension(x)
|
||||
p = append(p, e)
|
||||
}
|
||||
id, _ = Raw.Compose(p...)
|
||||
return id, false
|
||||
})
|
||||
}
|
||||
|
||||
func TestCompose3(t *testing.T) {
|
||||
partChecks(t, func(tt *parseTest) (id Tag, skip bool) {
|
||||
id, _ = Raw.Parse(tt.in)
|
||||
id, _ = Raw.Compose(id)
|
||||
return id, false
|
||||
})
|
||||
}
|
||||
|
||||
func mk(s string) Tag {
|
||||
return Raw.Make(s)
|
||||
}
|
||||
|
||||
func TestParseAcceptLanguage(t *testing.T) {
|
||||
type res struct {
|
||||
t Tag
|
||||
q float32
|
||||
}
|
||||
en := []res{{mk("en"), 1.0}}
|
||||
tests := []struct {
|
||||
out []res
|
||||
in string
|
||||
ok bool
|
||||
}{
|
||||
{en, "en", true},
|
||||
{en, " en", true},
|
||||
{en, "en ", true},
|
||||
{en, " en ", true},
|
||||
{en, "en,", true},
|
||||
{en, ",en", true},
|
||||
{en, ",,,en,,,", true},
|
||||
{en, ",en;q=1", true},
|
||||
|
||||
// We allow an empty input, contrary to spec.
|
||||
{nil, "", true},
|
||||
{[]res{{mk("aa"), 1}}, "aa;", true}, // allow unspecified weight
|
||||
|
||||
// errors
|
||||
{nil, ";", false},
|
||||
{nil, "$", false},
|
||||
{nil, "e;", false},
|
||||
{nil, "x;", false},
|
||||
{nil, "x", false},
|
||||
{nil, "ac", false}, // non-existing language
|
||||
{nil, "aa;q", false},
|
||||
{nil, "aa;q=", false},
|
||||
{nil, "aa;q=.", false},
|
||||
|
||||
// odd fallbacks
|
||||
{
|
||||
[]res{{mk("en"), 0.1}},
|
||||
" english ;q=.1",
|
||||
true,
|
||||
},
|
||||
{
|
||||
[]res{{mk("it"), 1.0}, {mk("de"), 1.0}, {mk("fr"), 1.0}},
|
||||
" italian, deutsch, french",
|
||||
true,
|
||||
},
|
||||
|
||||
// lists
|
||||
{
|
||||
[]res{{mk("en"), 0.1}},
|
||||
"en;q=.1",
|
||||
true,
|
||||
},
|
||||
{
|
||||
[]res{{mk("mul"), 1.0}},
|
||||
"*",
|
||||
true,
|
||||
},
|
||||
{
|
||||
[]res{{mk("en"), 1.0}, {mk("de"), 1.0}},
|
||||
"en,de",
|
||||
true,
|
||||
},
|
||||
{
|
||||
[]res{{mk("en"), 1.0}, {mk("de"), .5}},
|
||||
"en,de;q=0.5",
|
||||
true,
|
||||
},
|
||||
{
|
||||
[]res{{mk("de"), 0.8}, {mk("en"), 0.5}},
|
||||
" en ; q = 0.5 , , de;q=0.8",
|
||||
true,
|
||||
},
|
||||
{
|
||||
[]res{{mk("en"), 1.0}, {mk("de"), 1.0}, {mk("fr"), 1.0}, {mk("tlh"), 1.0}},
|
||||
"en,de,fr,i-klingon",
|
||||
true,
|
||||
},
|
||||
// sorting
|
||||
{
|
||||
[]res{{mk("tlh"), 0.4}, {mk("de"), 0.2}, {mk("fr"), 0.2}, {mk("en"), 0.1}},
|
||||
"en;q=0.1,de;q=0.2,fr;q=0.2,i-klingon;q=0.4",
|
||||
true,
|
||||
},
|
||||
// dropping
|
||||
{
|
||||
[]res{{mk("fr"), 0.2}, {mk("en"), 0.1}},
|
||||
"en;q=0.1,de;q=0,fr;q=0.2,i-klingon;q=0.0",
|
||||
true,
|
||||
},
|
||||
}
|
||||
for i, tt := range tests {
|
||||
tags, qs, e := ParseAcceptLanguage(tt.in)
|
||||
if e == nil != tt.ok {
|
||||
t.Errorf("%d:%s:err: was %v; want %v", i, tt.in, e == nil, tt.ok)
|
||||
}
|
||||
for j, tag := range tags {
|
||||
if out := tt.out[j]; !tag.equalTags(out.t) || qs[j] != out.q {
|
||||
t.Errorf("%d:%s: was %s, %1f; want %s, %1f", i, tt.in, tag, qs[j], out.t, out.q)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
3686
vendor/golang.org/x/text/language/tables.go
generated
vendored
Normal file
3686
vendor/golang.org/x/text/language/tables.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
143
vendor/golang.org/x/text/language/tags.go
generated
vendored
Normal file
143
vendor/golang.org/x/text/language/tags.go
generated
vendored
Normal file
@ -0,0 +1,143 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package language
|
||||
|
||||
// TODO: Various sets of commonly use tags and regions.
|
||||
|
||||
// MustParse is like Parse, but panics if the given BCP 47 tag cannot be parsed.
|
||||
// It simplifies safe initialization of Tag values.
|
||||
func MustParse(s string) Tag {
|
||||
t, err := Parse(s)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
// MustParse is like Parse, but panics if the given BCP 47 tag cannot be parsed.
|
||||
// It simplifies safe initialization of Tag values.
|
||||
func (c CanonType) MustParse(s string) Tag {
|
||||
t, err := c.Parse(s)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
// MustParseBase is like ParseBase, but panics if the given base cannot be parsed.
|
||||
// It simplifies safe initialization of Base values.
|
||||
func MustParseBase(s string) Base {
|
||||
b, err := ParseBase(s)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// MustParseScript is like ParseScript, but panics if the given script cannot be
|
||||
// parsed. It simplifies safe initialization of Script values.
|
||||
func MustParseScript(s string) Script {
|
||||
scr, err := ParseScript(s)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return scr
|
||||
}
|
||||
|
||||
// MustParseRegion is like ParseRegion, but panics if the given region cannot be
|
||||
// parsed. It simplifies safe initialization of Region values.
|
||||
func MustParseRegion(s string) Region {
|
||||
r, err := ParseRegion(s)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
var (
|
||||
und = Tag{}
|
||||
|
||||
Und Tag = Tag{}
|
||||
|
||||
Afrikaans Tag = Tag{lang: _af} // af
|
||||
Amharic Tag = Tag{lang: _am} // am
|
||||
Arabic Tag = Tag{lang: _ar} // ar
|
||||
ModernStandardArabic Tag = Tag{lang: _ar, region: _001} // ar-001
|
||||
Azerbaijani Tag = Tag{lang: _az} // az
|
||||
Bulgarian Tag = Tag{lang: _bg} // bg
|
||||
Bengali Tag = Tag{lang: _bn} // bn
|
||||
Catalan Tag = Tag{lang: _ca} // ca
|
||||
Czech Tag = Tag{lang: _cs} // cs
|
||||
Danish Tag = Tag{lang: _da} // da
|
||||
German Tag = Tag{lang: _de} // de
|
||||
Greek Tag = Tag{lang: _el} // el
|
||||
English Tag = Tag{lang: _en} // en
|
||||
AmericanEnglish Tag = Tag{lang: _en, region: _US} // en-US
|
||||
BritishEnglish Tag = Tag{lang: _en, region: _GB} // en-GB
|
||||
Spanish Tag = Tag{lang: _es} // es
|
||||
EuropeanSpanish Tag = Tag{lang: _es, region: _ES} // es-ES
|
||||
LatinAmericanSpanish Tag = Tag{lang: _es, region: _419} // es-419
|
||||
Estonian Tag = Tag{lang: _et} // et
|
||||
Persian Tag = Tag{lang: _fa} // fa
|
||||
Finnish Tag = Tag{lang: _fi} // fi
|
||||
Filipino Tag = Tag{lang: _fil} // fil
|
||||
French Tag = Tag{lang: _fr} // fr
|
||||
CanadianFrench Tag = Tag{lang: _fr, region: _CA} // fr-CA
|
||||
Gujarati Tag = Tag{lang: _gu} // gu
|
||||
Hebrew Tag = Tag{lang: _he} // he
|
||||
Hindi Tag = Tag{lang: _hi} // hi
|
||||
Croatian Tag = Tag{lang: _hr} // hr
|
||||
Hungarian Tag = Tag{lang: _hu} // hu
|
||||
Armenian Tag = Tag{lang: _hy} // hy
|
||||
Indonesian Tag = Tag{lang: _id} // id
|
||||
Icelandic Tag = Tag{lang: _is} // is
|
||||
Italian Tag = Tag{lang: _it} // it
|
||||
Japanese Tag = Tag{lang: _ja} // ja
|
||||
Georgian Tag = Tag{lang: _ka} // ka
|
||||
Kazakh Tag = Tag{lang: _kk} // kk
|
||||
Khmer Tag = Tag{lang: _km} // km
|
||||
Kannada Tag = Tag{lang: _kn} // kn
|
||||
Korean Tag = Tag{lang: _ko} // ko
|
||||
Kirghiz Tag = Tag{lang: _ky} // ky
|
||||
Lao Tag = Tag{lang: _lo} // lo
|
||||
Lithuanian Tag = Tag{lang: _lt} // lt
|
||||
Latvian Tag = Tag{lang: _lv} // lv
|
||||
Macedonian Tag = Tag{lang: _mk} // mk
|
||||
Malayalam Tag = Tag{lang: _ml} // ml
|
||||
Mongolian Tag = Tag{lang: _mn} // mn
|
||||
Marathi Tag = Tag{lang: _mr} // mr
|
||||
Malay Tag = Tag{lang: _ms} // ms
|
||||
Burmese Tag = Tag{lang: _my} // my
|
||||
Nepali Tag = Tag{lang: _ne} // ne
|
||||
Dutch Tag = Tag{lang: _nl} // nl
|
||||
Norwegian Tag = Tag{lang: _no} // no
|
||||
Punjabi Tag = Tag{lang: _pa} // pa
|
||||
Polish Tag = Tag{lang: _pl} // pl
|
||||
Portuguese Tag = Tag{lang: _pt} // pt
|
||||
BrazilianPortuguese Tag = Tag{lang: _pt, region: _BR} // pt-BR
|
||||
EuropeanPortuguese Tag = Tag{lang: _pt, region: _PT} // pt-PT
|
||||
Romanian Tag = Tag{lang: _ro} // ro
|
||||
Russian Tag = Tag{lang: _ru} // ru
|
||||
Sinhala Tag = Tag{lang: _si} // si
|
||||
Slovak Tag = Tag{lang: _sk} // sk
|
||||
Slovenian Tag = Tag{lang: _sl} // sl
|
||||
Albanian Tag = Tag{lang: _sq} // sq
|
||||
Serbian Tag = Tag{lang: _sr} // sr
|
||||
SerbianLatin Tag = Tag{lang: _sr, script: _Latn} // sr-Latn
|
||||
Swedish Tag = Tag{lang: _sv} // sv
|
||||
Swahili Tag = Tag{lang: _sw} // sw
|
||||
Tamil Tag = Tag{lang: _ta} // ta
|
||||
Telugu Tag = Tag{lang: _te} // te
|
||||
Thai Tag = Tag{lang: _th} // th
|
||||
Turkish Tag = Tag{lang: _tr} // tr
|
||||
Ukrainian Tag = Tag{lang: _uk} // uk
|
||||
Urdu Tag = Tag{lang: _ur} // ur
|
||||
Uzbek Tag = Tag{lang: _uz} // uz
|
||||
Vietnamese Tag = Tag{lang: _vi} // vi
|
||||
Chinese Tag = Tag{lang: _zh} // zh
|
||||
SimplifiedChinese Tag = Tag{lang: _zh, script: _Hans} // zh-Hans
|
||||
TraditionalChinese Tag = Tag{lang: _zh, script: _Hant} // zh-Hant
|
||||
Zulu Tag = Tag{lang: _zu} // zu
|
||||
)
|
389
vendor/golang.org/x/text/language/testdata/CLDRLocaleMatcherTest.txt
generated
vendored
Normal file
389
vendor/golang.org/x/text/language/testdata/CLDRLocaleMatcherTest.txt
generated
vendored
Normal file
@ -0,0 +1,389 @@
|
||||
# TODO: this file has not yet been included in the main CLDR release.
|
||||
# The intent is to verify this file against the Go implementation and then
|
||||
# correct the cases and add merge in other interesting test cases.
|
||||
# See TestCLDRCompliance in match_test.go, as well as the list of exceptions
|
||||
# defined in the map skip below it, for the work in progress.
|
||||
|
||||
# Data-driven test for the XLocaleMatcher.
|
||||
# Format
|
||||
# • Everything after "#" is a comment
|
||||
# • Arguments are separated by ";". They are:
|
||||
|
||||
# supported ; desired ; expected
|
||||
|
||||
# • The supported may have the threshold distance reset as a first item, eg 50, en, fr
|
||||
# A line starting with @debug will reach a statement in the test code where you can put a breakpoint for debugging
|
||||
# The test code also supports reformatting this file, by setting the REFORMAT flag.
|
||||
|
||||
##################################################
|
||||
# testParentLocales
|
||||
|
||||
# es-419, es-AR, and es-MX are in a cluster; es is in a different one
|
||||
|
||||
es-419, es-ES ; es-AR ; es-419
|
||||
es-ES, es-419 ; es-AR ; es-419
|
||||
|
||||
es-419, es ; es-AR ; es-419
|
||||
es, es-419 ; es-AR ; es-419
|
||||
|
||||
es-MX, es ; es-AR ; es-MX
|
||||
es, es-MX ; es-AR ; es-MX
|
||||
|
||||
# en-GB, en-AU, and en-NZ are in a cluster; en in a different one
|
||||
|
||||
en-GB, en-US ; en-AU ; en-GB
|
||||
en-US, en-GB ; en-AU ; en-GB
|
||||
|
||||
en-GB, en ; en-AU ; en-GB
|
||||
en, en-GB ; en-AU ; en-GB
|
||||
|
||||
en-NZ, en-US ; en-AU ; en-NZ
|
||||
en-US, en-NZ ; en-AU ; en-NZ
|
||||
|
||||
en-NZ, en ; en-AU ; en-NZ
|
||||
en, en-NZ ; en-AU ; en-NZ
|
||||
|
||||
# pt-AU and pt-PT in one cluster; pt-BR in another
|
||||
|
||||
pt-PT, pt-BR ; pt-AO ; pt-PT
|
||||
pt-BR, pt-PT ; pt-AO ; pt-PT
|
||||
|
||||
pt-PT, pt ; pt-AO ; pt-PT
|
||||
pt, pt-PT ; pt-AO ; pt-PT
|
||||
|
||||
zh-MO, zh-TW ; zh-HK ; zh-MO
|
||||
zh-TW, zh-MO ; zh-HK ; zh-MO
|
||||
|
||||
zh-MO, zh-TW ; zh-HK ; zh-MO
|
||||
zh-TW, zh-MO ; zh-HK ; zh-MO
|
||||
|
||||
zh-MO, zh-CN ; zh-HK ; zh-MO
|
||||
zh-CN, zh-MO ; zh-HK ; zh-MO
|
||||
|
||||
zh-MO, zh ; zh-HK ; zh-MO
|
||||
zh, zh-MO ; zh-HK ; zh-MO
|
||||
|
||||
##################################################
|
||||
# testChinese
|
||||
|
||||
zh-CN, zh-TW, iw ; zh-Hant-TW ; zh-TW
|
||||
zh-CN, zh-TW, iw ; zh-Hant ; zh-TW
|
||||
zh-CN, zh-TW, iw ; zh-TW ; zh-TW
|
||||
zh-CN, zh-TW, iw ; zh-Hans-CN ; zh-CN
|
||||
zh-CN, zh-TW, iw ; zh-CN ; zh-CN
|
||||
zh-CN, zh-TW, iw ; zh ; zh-CN
|
||||
|
||||
##################################################
|
||||
# testenGB
|
||||
|
||||
fr, en, en-GB, es-419, es-MX, es ; en-NZ ; en-GB
|
||||
fr, en, en-GB, es-419, es-MX, es ; es-ES ; es
|
||||
fr, en, en-GB, es-419, es-MX, es ; es-AR ; es-419
|
||||
fr, en, en-GB, es-419, es-MX, es ; es-MX ; es-MX
|
||||
|
||||
##################################################
|
||||
# testFallbacks
|
||||
|
||||
91, en, hi ; sa ; hi
|
||||
|
||||
##################################################
|
||||
# testBasics
|
||||
|
||||
fr, en-GB, en ; en-GB ; en-GB
|
||||
fr, en-GB, en ; en ; en
|
||||
fr, en-GB, en ; fr ; fr
|
||||
fr, en-GB, en ; ja ; fr # return first if no match
|
||||
|
||||
##################################################
|
||||
# testFallback
|
||||
|
||||
# check that script fallbacks are handled right
|
||||
|
||||
zh-CN, zh-TW, iw ; zh-Hant ; zh-TW
|
||||
zh-CN, zh-TW, iw ; zh ; zh-CN
|
||||
zh-CN, zh-TW, iw ; zh-Hans-CN ; zh-CN
|
||||
zh-CN, zh-TW, iw ; zh-Hant-HK ; zh-TW
|
||||
zh-CN, zh-TW, iw ; he-IT ; iw
|
||||
|
||||
##################################################
|
||||
# testSpecials
|
||||
|
||||
# check that nearby languages are handled
|
||||
|
||||
en, fil, ro, nn ; tl ; fil
|
||||
en, fil, ro, nn ; mo ; ro
|
||||
en, fil, ro, nn ; nb ; nn
|
||||
|
||||
# make sure default works
|
||||
|
||||
en, fil, ro, nn ; ja ; en
|
||||
|
||||
##################################################
|
||||
# testRegionalSpecials
|
||||
|
||||
# verify that en-AU is closer to en-GB than to en (which is en-US)
|
||||
|
||||
en, en-GB, es, es-419 ; es-MX ; es-419
|
||||
en, en-GB, es, es-419 ; en-AU ; en-GB
|
||||
en, en-GB, es, es-419 ; es-ES ; es
|
||||
|
||||
##################################################
|
||||
# testHK
|
||||
|
||||
# HK and MO are closer to each other for Hant than to TW
|
||||
|
||||
zh, zh-TW, zh-MO ; zh-HK ; zh-MO
|
||||
zh, zh-TW, zh-HK ; zh-MO ; zh-HK
|
||||
|
||||
##################################################
|
||||
# testMatch-exact
|
||||
|
||||
# see localeDistance.txt
|
||||
|
||||
##################################################
|
||||
# testMatch-none
|
||||
|
||||
# see localeDistance.txt
|
||||
|
||||
##################################################
|
||||
# testMatch-matchOnMazimized
|
||||
|
||||
zh, zh-Hant ; und-TW ; zh-Hant # und-TW should be closer to zh-Hant than to zh
|
||||
en-Hant-TW, und-TW ; zh-Hant ; und-TW # zh-Hant should be closer to und-TW than to en-Hant-TW
|
||||
en-Hant-TW, und-TW ; zh ; und-TW # zh should be closer to und-TW than to en-Hant-TW
|
||||
|
||||
##################################################
|
||||
# testMatchGrandfatheredCode
|
||||
|
||||
fr, i-klingon, en-Latn-US ; en-GB-oed ; en-Latn-US
|
||||
|
||||
##################################################
|
||||
# testGetBestMatchForList-exactMatch
|
||||
fr, en-GB, ja, es-ES, es-MX ; ja, de ; ja
|
||||
|
||||
##################################################
|
||||
# testGetBestMatchForList-simpleVariantMatch
|
||||
fr, en-GB, ja, es-ES, es-MX ; de, en-US ; en-GB # Intentionally avoiding a perfect-match or two candidates for variant matches.
|
||||
|
||||
# Fallback.
|
||||
|
||||
fr, en-GB, ja, es-ES, es-MX ; de, zh ; fr
|
||||
|
||||
##################################################
|
||||
# testGetBestMatchForList-matchOnMaximized
|
||||
# Check that if the preference is maximized already, it works as well.
|
||||
|
||||
en, ja ; ja-Jpan-JP, en-AU ; ja # Match for ja-Jpan-JP (maximized already)
|
||||
|
||||
# ja-JP matches ja on likely subtags, and it's listed first, thus it wins over the second preference en-GB.
|
||||
|
||||
en, ja ; ja-JP, en-US ; ja # Match for ja-Jpan-JP (maximized already)
|
||||
|
||||
# Check that if the preference is maximized already, it works as well.
|
||||
|
||||
en, ja ; ja-Jpan-JP, en-US ; ja # Match for ja-Jpan-JP (maximized already)
|
||||
|
||||
##################################################
|
||||
# testGetBestMatchForList-noMatchOnMaximized
|
||||
# Regression test for http://b/5714572 .
|
||||
# de maximizes to de-DE. Pick the exact match for the secondary language instead.
|
||||
en, de, fr, ja ; de-CH, fr ; de
|
||||
|
||||
##################################################
|
||||
# testBestMatchForTraditionalChinese
|
||||
|
||||
# Scenario: An application that only supports Simplified Chinese (and some other languages),
|
||||
# but does not support Traditional Chinese. zh-Hans-CN could be replaced with zh-CN, zh, or
|
||||
# zh-Hans, it wouldn't make much of a difference.
|
||||
|
||||
# The script distance (simplified vs. traditional Han) is considered small enough
|
||||
# to be an acceptable match. The regional difference is considered almost insignificant.
|
||||
|
||||
fr, zh-Hans-CN, en-US ; zh-TW ; zh-Hans-CN
|
||||
fr, zh-Hans-CN, en-US ; zh-Hant ; zh-Hans-CN
|
||||
|
||||
# For geo-political reasons, you might want to avoid a zh-Hant -> zh-Hans match.
|
||||
# In this case, if zh-TW, zh-HK or a tag starting with zh-Hant is requested, you can
|
||||
# change your call to getBestMatch to include a 2nd language preference.
|
||||
# "en" is a better match since its distance to "en-US" is closer than the distance
|
||||
# from "zh-TW" to "zh-CN" (script distance).
|
||||
|
||||
fr, zh-Hans-CN, en-US ; zh-TW, en ; en-US
|
||||
fr, zh-Hans-CN, en-US ; zh-Hant-CN, en, en ; en-US
|
||||
fr, zh-Hans-CN, en-US ; zh-Hans, en ; zh-Hans-CN
|
||||
|
||||
##################################################
|
||||
# testUndefined
|
||||
# When the undefined language doesn't match anything in the list,
|
||||
# getBestMatch returns the default, as usual.
|
||||
|
||||
it, fr ; und ; it
|
||||
|
||||
# When it *does* occur in the list, bestMatch returns it, as expected.
|
||||
it, und ; und ; und
|
||||
|
||||
# The unusual part: max("und") = "en-Latn-US", and since matching is based on maximized
|
||||
# tags, the undefined language would normally match English. But that would produce the
|
||||
# counterintuitive results that getBestMatch("und", XLocaleMatcher("it,en")) would be "en", and
|
||||
# getBestMatch("en", XLocaleMatcher("it,und")) would be "und".
|
||||
|
||||
# To avoid that, we change the matcher's definitions of max
|
||||
# so that max("und")="und". That produces the following, more desirable
|
||||
# results:
|
||||
|
||||
it, en ; und ; it
|
||||
it, und ; en ; it
|
||||
|
||||
##################################################
|
||||
# testGetBestMatch-regionDistance
|
||||
|
||||
es-AR, es ; es-MX ; es-AR
|
||||
fr, en, en-GB ; en-CA ; en-GB
|
||||
de-AT, de-DE, de-CH ; de ; de-DE
|
||||
|
||||
##################################################
|
||||
# testAsymmetry
|
||||
|
||||
mul, nl ; af ; nl # af => nl
|
||||
mul, af ; nl ; mul # but nl !=> af
|
||||
|
||||
##################################################
|
||||
# testGetBestMatchForList-matchOnMaximized2
|
||||
|
||||
# ja-JP matches ja on likely subtags, and it's listed first, thus it wins over the second preference en-GB.
|
||||
|
||||
fr, en-GB, ja, es-ES, es-MX ; ja-JP, en-GB ; ja # Match for ja-JP, with likely region subtag
|
||||
|
||||
# Check that if the preference is maximized already, it works as well.
|
||||
|
||||
fr, en-GB, ja, es-ES, es-MX ; ja-Jpan-JP, en-GB ; ja # Match for ja-Jpan-JP (maximized already)
|
||||
|
||||
##################################################
|
||||
# testGetBestMatchForList-closeEnoughMatchOnMaximized
|
||||
|
||||
en-GB, en, de, fr, ja ; de-CH, fr ; de
|
||||
en-GB, en, de, fr, ja ; en-US, ar, nl, de, ja ; en
|
||||
|
||||
##################################################
|
||||
# testGetBestMatchForPortuguese
|
||||
|
||||
# pt might be supported and not pt-PT
|
||||
|
||||
# European user who prefers Spanish over Brazillian Portuguese as a fallback.
|
||||
|
||||
pt-PT, pt-BR, es, es-419 ; pt-PT, es, pt ; pt-PT
|
||||
pt-PT, pt, es, es-419 ; pt-PT, es, pt ; pt-PT # pt implicit
|
||||
|
||||
# Brazillian user who prefers South American Spanish over European Portuguese as a fallback.
|
||||
# The asymmetry between this case and above is because it's "pt-PT" that's missing between the
|
||||
# matchers as "pt-BR" is a much more common language.
|
||||
|
||||
pt-PT, pt-BR, es, es-419 ; pt, es-419, pt-PT ; pt-BR
|
||||
pt-PT, pt-BR, es, es-419 ; pt-PT, es, pt ; pt-PT
|
||||
pt-PT, pt, es, es-419 ; pt-PT, es, pt ; pt-PT
|
||||
pt-PT, pt, es, es-419 ; pt, es-419, pt-PT ; pt
|
||||
|
||||
pt-BR, es, es-419 ; pt, es-419, pt-PT ; pt-BR
|
||||
|
||||
# Code that adds the user's country can get "pt-US" for a user's language.
|
||||
# That should fall back to "pt-BR".
|
||||
|
||||
pt-PT, pt-BR, es, es-419 ; pt-US, pt-PT ; pt-BR
|
||||
pt-PT, pt, es, es-419 ; pt-US, pt-PT, pt ; pt # pt-BR implicit
|
||||
|
||||
##################################################
|
||||
# testVariantWithScriptMatch 1 and 2
|
||||
|
||||
fr, en, sv ; en-GB ; en
|
||||
fr, en, sv ; en-GB ; en
|
||||
en, sv ; en-GB, sv ; en
|
||||
|
||||
##################################################
|
||||
# testLongLists
|
||||
|
||||
en, sv ; sv ; sv
|
||||
af, am, ar, az, be, bg, bn, bs, ca, cs, cy, cy, da, de, el, en, en-GB, es, es-419, et, eu, fa, fi, fil, fr, ga, gl, gu, hi, hr, hu, hy, id, is, it, iw, ja, ka, kk, km, kn, ko, ky, lo, lt, lv, mk, ml, mn, mr, ms, my, ne, nl, no, pa, pl, pt, pt-PT, ro, ru, si, sk, sl, sq, sr, sr-Latn, sv, sw, ta, te, th, tr, uk, ur, uz, vi, zh-CN, zh-TW, zu ; sv ; sv
|
||||
af, af-NA, af-ZA, agq, agq-CM, ak, ak-GH, am, am-ET, ar, ar-001, ar-AE, ar-BH, ar-DJ, ar-DZ, ar-EG, ar-EH, ar-ER, ar-IL, ar-IQ, ar-JO, ar-KM, ar-KW, ar-LB, ar-LY, ar-MA, ar-MR, ar-OM, ar-PS, ar-QA, ar-SA, ar-SD, ar-SO, ar-SS, ar-SY, ar-TD, ar-TN, ar-YE, as, as-IN, asa, asa-TZ, ast, ast-ES, az, az-Cyrl, az-Cyrl-AZ, az-Latn, az-Latn-AZ, bas, bas-CM, be, be-BY, bem, bem-ZM, bez, bez-TZ, bg, bg-BG, bm, bm-ML, bn, bn-BD, bn-IN, bo, bo-CN, bo-IN, br, br-FR, brx, brx-IN, bs, bs-Cyrl, bs-Cyrl-BA, bs-Latn, bs-Latn-BA, ca, ca-AD, ca-ES, ca-ES-VALENCIA, ca-FR, ca-IT, ce, ce-RU, cgg, cgg-UG, chr, chr-US, ckb, ckb-IQ, ckb-IR, cs, cs-CZ, cu, cu-RU, cy, cy-GB, da, da-DK, da-GL, dav, dav-KE, de, de-AT, de-BE, de-CH, de-DE, de-LI, de-LU, dje, dje-NE, dsb, dsb-DE, dua, dua-CM, dyo, dyo-SN, dz, dz-BT, ebu, ebu-KE, ee, ee-GH, ee-TG, el, el-CY, el-GR, en, en-001, en-150, en-AG, en-AI, en-AS, en-AT, en-AU, en-BB, en-BE, en-BI, en-BM, en-BS, en-BW, en-BZ, en-CA, en-CC, en-CH, en-CK, en-CM, en-CX, en-CY, en-DE, en-DG, en-DK, en-DM, en-ER, en-FI, en-FJ, en-FK, en-FM, en-GB, en-GD, en-GG, en-GH, en-GI, en-GM, en-GU, en-GY, en-HK, en-IE, en-IL, en-IM, en-IN, en-IO, en-JE, en-JM, en-KE, en-KI, en-KN, en-KY, en-LC, en-LR, en-LS, en-MG, en-MH, en-MO, en-MP, en-MS, en-MT, en-MU, en-MW, en-MY, en-NA, en-NF, en-NG, en-NL, en-NR, en-NU, en-NZ, en-PG, en-PH, en-PK, en-PN, en-PR, en-PW, en-RW, en-SB, en-SC, en-SD, en-SE, en-SG, en-SH, en-SI, en-SL, en-SS, en-SX, en-SZ, en-TC, en-TK, en-TO, en-TT, en-TV, en-TZ, en-UG, en-UM, en-US, en-US-POSIX, en-VC, en-VG, en-VI, en-VU, en-WS, en-ZA, en-ZM, en-ZW, eo, eo-001, es, es-419, es-AR, es-BO, es-CL, es-CO, es-CR, es-CU, es-DO, es-EA, es-EC, es-ES, es-GQ, es-GT, es-HN, es-IC, es-MX, es-NI, es-PA, es-PE, es-PH, es-PR, es-PY, es-SV, es-US, es-UY, es-VE, et, et-EE, eu, eu-ES, ewo, ewo-CM, fa, fa-AF, fa-IR, ff, ff-CM, ff-GN, ff-MR, ff-SN, fi, fi-FI, fil, fil-PH, fo, fo-DK, fo-FO, fr, fr-BE, fr-BF, fr-BI, fr-BJ, fr-BL, fr-CA, fr-CD, fr-CF, fr-CG, fr-CH, fr-CI, fr-CM, fr-DJ, fr-DZ, fr-FR, fr-GA, fr-GF, fr-GN, fr-GP, fr-GQ, fr-HT, fr-KM, fr-LU, fr-MA, fr-MC, fr-MF, fr-MG, fr-ML, fr-MQ, fr-MR, fr-MU, fr-NC, fr-NE, fr-PF, fr-PM, fr-RE, fr-RW, fr-SC, fr-SN, fr-SY, fr-TD, fr-TG, fr-TN, fr-VU, fr-WF, fr-YT, fur, fur-IT, fy, fy-NL, ga, ga-IE, gd, gd-GB, gl, gl-ES, gsw, gsw-CH, gsw-FR, gsw-LI, gu, gu-IN, guz, guz-KE, gv, gv-IM, ha, ha-GH, ha-NE, ha-NG, haw, haw-US, he, he-IL, hi, hi-IN, hr, hr-BA, hr-HR, hsb, hsb-DE, hu, hu-HU, hy, hy-AM, id, id-ID, ig, ig-NG, ii, ii-CN, is, is-IS, it, it-CH, it-IT, it-SM, ja, ja-JP, jgo, jgo-CM, jmc, jmc-TZ, ka, ka-GE, kab, kab-DZ, kam, kam-KE, kde, kde-TZ, kea, kea-CV, khq, khq-ML, ki, ki-KE, kk, kk-KZ, kkj, kkj-CM, kl, kl-GL, kln, kln-KE, km, km-KH, kn, kn-IN, ko, ko-KP, ko-KR, kok, kok-IN, ks, ks-IN, ksb, ksb-TZ, ksf, ksf-CM, ksh, ksh-DE, kw, kw-GB, ky, ky-KG, lag, lag-TZ, lb, lb-LU, lg, lg-UG, lkt, lkt-US, ln, ln-AO, ln-CD, ln-CF, ln-CG, lo, lo-LA, lrc, lrc-IQ, lrc-IR, lt, lt-LT, lu, lu-CD, luo, luo-KE, luy, luy-KE, lv, lv-LV, mas, mas-KE, mas-TZ, mer, mer-KE, mfe, mfe-MU, mg, mg-MG, mgh, mgh-MZ, mgo, mgo-CM, mk, mk-MK, ml, ml-IN, mn, mn-MN, mr, mr-IN, ms, ms-BN, ms-MY, ms-SG, mt, mt-MT, mua, mua-CM, my, my-MM, mzn, mzn-IR, naq, naq-NA, nb, nb-NO, nb-SJ, nd, nd-ZW, ne, ne-IN, ne-NP, nl, nl-AW, nl-BE, nl-BQ, nl-CW, nl-NL, nl-SR, nl-SX, nmg, nmg-CM, nn, nn-NO, nnh, nnh-CM, nus, nus-SS, nyn, nyn-UG, om, om-ET, om-KE, or, or-IN, os, os-GE, os-RU, pa, pa-Arab, pa-Arab-PK, pa-Guru, pa-Guru-IN, pl, pl-PL, prg, prg-001, ps, ps-AF, pt, pt-AO, pt-BR, pt-CV, pt-GW, pt-MO, pt-MZ, pt-PT, pt-ST, pt-TL, qu, qu-BO, qu-EC, qu-PE, rm, rm-CH, rn, rn-BI, ro, ro-MD, ro-RO, rof, rof-TZ, root, ru, ru-BY, ru-KG, ru-KZ, ru-MD, ru-RU, ru-UA, rw, rw-RW, rwk, rwk-TZ, sah, sah-RU, saq, saq-KE, sbp, sbp-TZ, se, se-FI, se-NO, se-SE, seh, seh-MZ, ses, ses-ML, sg, sg-CF, shi, shi-Latn, shi-Latn-MA, shi-Tfng, shi-Tfng-MA, si, si-LK, sk, sk-SK, sl, sl-SI, smn, smn-FI, sn, sn-ZW, so, so-DJ, so-ET, so-KE, so-SO, sq, sq-AL, sq-MK, sq-XK, sr, sr-Cyrl, sr-Cyrl-BA, sr-Cyrl-ME, sr-Cyrl-RS, sr-Cyrl-XK, sr-Latn, sr-Latn-BA, sr-Latn-ME, sr-Latn-RS, sr-Latn-XK, sv, sv-AX, sv-FI, sv-SE, sw, sw-CD, sw-KE, sw-TZ, sw-UG, ta, ta-IN, ta-LK, ta-MY, ta-SG, te, te-IN, teo, teo-KE, teo-UG, th, th-TH, ti, ti-ER, ti-ET, tk, tk-TM, to, to-TO, tr, tr-CY, tr-TR, twq, twq-NE, tzm, tzm-MA, ug, ug-CN, uk, uk-UA, ur, ur-IN, ur-PK, uz, uz-Arab, uz-Arab-AF, uz-Cyrl, uz-Cyrl-UZ, uz-Latn, uz-Latn-UZ, vai, vai-Latn, vai-Latn-LR, vai-Vaii, vai-Vaii-LR, vi, vi-VN, vo, vo-001, vun, vun-TZ, wae, wae-CH, xog, xog-UG, yav, yav-CM, yi, yi-001, yo, yo-BJ, yo-NG, zgh, zgh-MA, zh, zh-Hans, zh-Hans-CN, zh-Hans-HK, zh-Hans-MO, zh-Hans-SG, zh-Hant, zh-Hant-HK, zh-Hant-MO, zh-Hant-TW, zu, zu-ZA ; sv ; sv
|
||||
|
||||
##################################################
|
||||
# test8288
|
||||
|
||||
it, en ; und ; it
|
||||
it, en ; und, en ; en
|
||||
|
||||
# examples from
|
||||
# http://unicode.org/repos/cldr/tags/latest/common/bcp47/
|
||||
# http://unicode.org/repos/cldr/tags/latest/common/validity/variant.xml
|
||||
|
||||
##################################################
|
||||
# testUnHack
|
||||
|
||||
en-NZ, en-IT ; en-US ; en-NZ
|
||||
|
||||
##################################################
|
||||
# testEmptySupported => null
|
||||
; en ; null
|
||||
|
||||
##################################################
|
||||
# testVariantsAndExtensions
|
||||
##################################################
|
||||
# tests the .combine() method
|
||||
|
||||
und, fr ; fr-BE-fonipa ; fr ; fr-BE-fonipa
|
||||
und, fr-CA ; fr-BE-fonipa ; fr-CA ; fr-BE-fonipa
|
||||
und, fr-fonupa ; fr-BE-fonipa ; fr-fonupa ; fr-BE-fonipa
|
||||
und, no ; nn-BE-fonipa ; no ; no-BE-fonipa
|
||||
und, en-GB-u-sd-gbsct ; en-fonipa-u-nu-Arab-ca-buddhist-t-m0-iso-i0-pinyin ; en-GB-u-sd-gbsct ; en-GB-fonipa-u-nu-Arab-ca-buddhist-t-m0-iso-i0-pinyin
|
||||
|
||||
en-PSCRACK, de-PSCRACK, fr-PSCRACK, pt-PT-PSCRACK ; fr-PSCRACK ; fr-PSCRACK
|
||||
en-PSCRACK, de-PSCRACK, fr-PSCRACK, pt-PT-PSCRACK ; fr ; fr-PSCRACK
|
||||
en-PSCRACK, de-PSCRACK, fr-PSCRACK, pt-PT-PSCRACK ; de-CH ; de-PSCRACK
|
||||
|
||||
##################################################
|
||||
# testClusters
|
||||
# we favor es-419 over others in cluster. Clusters: es- {ES, MA, EA} {419, AR, MX}
|
||||
|
||||
und, es, es-MA, es-MX, es-419 ; es-AR ; es-419
|
||||
und, es-MA, es, es-419, es-MX ; es-AR ; es-419
|
||||
und, es, es-MA, es-MX, es-419 ; es-EA ; es
|
||||
und, es-MA, es, es-419, es-MX ; es-EA ; es
|
||||
|
||||
# of course, fall back to within cluster
|
||||
|
||||
und, es, es-MA, es-MX ; es-AR ; es-MX
|
||||
und, es-MA, es, es-MX ; es-AR ; es-MX
|
||||
und, es-MA, es-MX, es-419 ; es-EA ; es-MA
|
||||
und, es-MA, es-419, es-MX ; es-EA ; es-MA
|
||||
|
||||
# we favor es-GB over others in cluster. Clusters: en- {US, GU, VI} {GB, IN, ZA}
|
||||
|
||||
und, en, en-GU, en-IN, en-GB ; en-ZA ; en-GB
|
||||
und, en-GU, en, en-GB, en-IN ; en-ZA ; en-GB
|
||||
und, en, en-GU, en-IN, en-GB ; en-VI ; en
|
||||
und, en-GU, en, en-GB, en-IN ; en-VI ; en
|
||||
|
||||
# of course, fall back to within cluster
|
||||
|
||||
und, en, en-GU, en-IN ; en-ZA ; en-IN
|
||||
und, en-GU, en, en-IN ; en-ZA ; en-IN
|
||||
und, en-GU, en-IN, en-GB ; en-VI ; en-GU
|
||||
und, en-GU, en-GB, en-IN ; en-VI ; en-GU
|
||||
|
||||
##################################################
|
||||
# testThreshold
|
||||
@Threshold=60
|
||||
|
||||
50, und, fr-CA-fonupa ; fr-BE-fonipa ; fr-CA-fonupa ; fr-BE-fonipa
|
||||
50, und, fr-Cyrl-CA-fonupa ; fr-BE-fonipa ; fr-Cyrl-CA-fonupa ; fr-Cyrl-BE-fonipa
|
||||
|
||||
@Threshold=-1 # restore
|
||||
|
||||
##################################################
|
||||
# testScriptFirst
|
||||
@DistanceOption=SCRIPT_FIRST
|
||||
@debug
|
||||
|
||||
ru, fr ; zh, pl ; fr
|
||||
ru, fr ; zh-Cyrl, pl ; ru
|
||||
hr, en-Cyrl; sr ; en-Cyrl
|
||||
da, ru, hr; sr ; ru
|
226
vendor/golang.org/x/text/language/testdata/GoLocaleMatcherTest.txt
generated
vendored
Normal file
226
vendor/golang.org/x/text/language/testdata/GoLocaleMatcherTest.txt
generated
vendored
Normal file
@ -0,0 +1,226 @@
|
||||
# basics
|
||||
fr, en-GB, en ; en-GB ; en-GB
|
||||
fr, en-GB, en ; en-US ; en
|
||||
fr, en-GB, en ; fr-FR ; fr
|
||||
fr, en-GB, en ; ja-JP ; fr
|
||||
|
||||
# script fallbacks
|
||||
zh-CN, zh-TW, iw ; zh-Hant ; zh-TW
|
||||
zh-CN, zh-TW, iw ; zh ; zh-CN
|
||||
zh-CN, zh-TW, iw ; zh-Hans-CN ; zh-CN
|
||||
zh-CN, zh-TW, iw ; zh-Hant-HK ; zh-TW
|
||||
zh-CN, zh-TW, iw ; he-IT ; iw ; iw
|
||||
|
||||
# language-specific script fallbacks 1
|
||||
en, sr, nl ; sr-Latn ; sr
|
||||
en, sr, nl ; sh ; sr # different script, but seems okay and is as CLDR suggests
|
||||
en, sr, nl ; hr ; en
|
||||
en, sr, nl ; bs ; en
|
||||
en, sr, nl ; nl-Cyrl ; sr
|
||||
|
||||
# language-specific script fallbacks 2
|
||||
en, sh ; sr ; sh
|
||||
en, sh ; sr-Cyrl ; sh
|
||||
en, sh ; hr ; sh
|
||||
|
||||
# don't match hr to sr-Latn
|
||||
en, sr-Latn ; hr ; en
|
||||
|
||||
# both deprecated and not
|
||||
fil, tl, iw, he ; he-IT ; he
|
||||
fil, tl, iw, he ; he ; he
|
||||
fil, tl, iw, he ; iw ; iw
|
||||
fil, tl, iw, he ; fil-IT ; fil
|
||||
fil, tl, iw, he ; fil ; fil
|
||||
fil, tl, iw, he ; tl ; tl
|
||||
|
||||
# nearby languages
|
||||
en, fil, ro, nn ; tl ; fil
|
||||
en, fil, ro, nn ; mo ; ro
|
||||
en, fil, ro, nn ; nb ; nn
|
||||
en, fil, ro, nn ; ja ; en
|
||||
|
||||
# nearby languages: Nynorsk to Bokmål
|
||||
en, nb ; nn ; nb
|
||||
|
||||
# nearby languages: Danish does not match nn
|
||||
en, nn ; da ; en
|
||||
|
||||
# nearby languages: Danish matches no
|
||||
en, no ; da ; no
|
||||
|
||||
# nearby languages: Danish matches nb
|
||||
en, nb ; da ; nb
|
||||
|
||||
# prefer matching languages over language variants.
|
||||
nn, en-GB ; no, en-US ; en-GB
|
||||
nn, en-GB ; nb, en-US ; en-GB
|
||||
|
||||
# deprecated version is closer than same language with other differences
|
||||
nl, he, en-GB ; iw, en-US ; he
|
||||
|
||||
# macro equivalent is closer than same language with other differences
|
||||
nl, zh, en-GB, no ; cmn, en-US ; zh
|
||||
nl, zh, en-GB, no ; nb, en-US ; no
|
||||
|
||||
# legacy equivalent is closer than same language with other differences
|
||||
nl, fil, en-GB ; tl, en-US ; fil
|
||||
|
||||
# distinguish near equivalents
|
||||
en, ro, mo, ro-MD ; ro ; ro
|
||||
en, ro, mo, ro-MD ; mo ; mo
|
||||
en, ro, mo, ro-MD ; ro-MD ; ro-MD
|
||||
|
||||
# maximization of legacy
|
||||
sr-Cyrl, sr-Latn, ro, ro-MD ; sh ; sr-Latn
|
||||
sr-Cyrl, sr-Latn, ro, ro-MD ; mo ; ro-MD
|
||||
|
||||
# empty
|
||||
; fr ; und
|
||||
; en ; und
|
||||
|
||||
# private use subtags
|
||||
fr, en-GB, x-bork, es-ES, es-419 ; x-piglatin ; fr
|
||||
fr, en-GB, x-bork, es-ES, es-419 ; x-bork ; x-bork
|
||||
|
||||
# grandfathered codes
|
||||
fr, i-klingon, en-Latn-US ; en-GB-oed ; en-Latn-US
|
||||
fr, i-klingon, en-Latn-US ; i-klingon ; tlh
|
||||
|
||||
|
||||
# simple variant match
|
||||
fr, en-GB, ja, es-ES, es-MX ; de, en-US ; en-GB
|
||||
fr, en-GB, ja, es-ES, es-MX ; de, zh ; fr
|
||||
|
||||
# best match for traditional Chinese
|
||||
fr, zh-Hans-CN, en-US ; zh-TW ; zh-Hans-CN
|
||||
fr, zh-Hans-CN, en-US ; zh-Hant ; zh-Hans-CN
|
||||
fr, zh-Hans-CN, en-US ; zh-TW, en ; en-US
|
||||
fr, zh-Hans-CN, en-US ; zh-Hant-CN, en ; en-US
|
||||
fr, zh-Hans-CN, en-US ; zh-Hans, en ; zh-Hans-CN
|
||||
|
||||
# more specific script should win in case regions are identical
|
||||
af, af-Latn, af-Arab ; af ; af
|
||||
af, af-Latn, af-Arab ; af-ZA ; af
|
||||
af, af-Latn, af-Arab ; af-Latn-ZA ; af-Latn
|
||||
af, af-Latn, af-Arab ; af-Latn ; af-Latn
|
||||
|
||||
# more specific region should win
|
||||
nl, nl-NL, nl-BE ; nl ; nl
|
||||
nl, nl-NL, nl-BE ; nl-Latn ; nl
|
||||
nl, nl-NL, nl-BE ; nl-Latn-NL ; nl-NL
|
||||
nl, nl-NL, nl-BE ; nl-NL ; nl-NL
|
||||
|
||||
# region may replace matched if matched is enclosing
|
||||
es-419,es ; es-MX ; es-419 ; es-MX
|
||||
es-419,es ; es-SG ; es
|
||||
|
||||
# more specific region wins over more specific script
|
||||
nl, nl-Latn, nl-NL, nl-BE ; nl ; nl
|
||||
nl, nl-Latn, nl-NL, nl-BE ; nl-Latn ; nl-Latn
|
||||
nl, nl-Latn, nl-NL, nl-BE ; nl-NL ; nl-NL
|
||||
nl, nl-Latn, nl-NL, nl-BE ; nl-Latn-NL ; nl-NL
|
||||
|
||||
# region distance Portuguese
|
||||
pt, pt-PT ; pt-ES ; pt-PT
|
||||
|
||||
# if no preferred locale specified, pick top language, not regional
|
||||
en, fr, fr-CA, fr-CH ; fr-US ; fr #TODO: ; fr-u-rg-US
|
||||
|
||||
# region distance German
|
||||
de-AT, de-DE, de-CH ; de ; de-DE
|
||||
|
||||
# en-AU is closer to en-GB than to en (which is en-US)
|
||||
en, en-GB, es-ES, es-419 ; en-AU ; en-GB
|
||||
en, en-GB, es-ES, es-419 ; es-MX ; es-419 ; es-MX
|
||||
en, en-GB, es-ES, es-419 ; es-PT ; es-ES
|
||||
|
||||
# undefined
|
||||
it, fr ; und ; it
|
||||
|
||||
# und does not match en
|
||||
it, en ; und ; it
|
||||
|
||||
# undefined in priority list
|
||||
it, und ; und ; und
|
||||
it, und ; en ; it
|
||||
|
||||
# undefined
|
||||
it, fr, zh ; und-FR ; fr
|
||||
it, fr, zh ; und-CN ; zh
|
||||
it, fr, zh ; und-Hans ; zh
|
||||
it, fr, zh ; und-Hant ; zh
|
||||
it, fr, zh ; und-Latn ; it
|
||||
|
||||
# match on maximized tag
|
||||
fr, en-GB, ja, es-ES, es-MX ; ja-JP, en-GB ; ja
|
||||
fr, en-GB, ja, es-ES, es-MX ; ja-Jpan-JP, en-GB ; ja
|
||||
|
||||
# pick best maximized tag
|
||||
ja, ja-Jpan-US, ja-JP, en, ru ; ja-Jpan, ru ; ja
|
||||
ja, ja-Jpan-US, ja-JP, en, ru ; ja-JP, ru ; ja-JP
|
||||
ja, ja-Jpan-US, ja-JP, en, ru ; ja-US, ru ; ja-Jpan-US
|
||||
|
||||
# termination: pick best maximized match
|
||||
ja, ja-Jpan, ja-JP, en, ru ; ja-Jpan-JP, ru ; ja-JP
|
||||
ja, ja-Jpan, ja-JP, en, ru ; ja-Jpan, ru ; ja-Jpan
|
||||
|
||||
# same language over exact, but distinguish when user is explicit
|
||||
fr, en-GB, ja, es-ES, es-MX ; ja, de ; ja
|
||||
en, de, fr, ja ; de-CH, fr ; de # TODO: ; de-u-rg-CH
|
||||
en-GB, nl ; en, nl ; en-GB
|
||||
en-GB, nl ; en, nl, en-GB ; nl
|
||||
|
||||
# parent relation preserved
|
||||
en, en-US, en-GB, es, es-419, pt, pt-BR, pt-PT, zh, zh-Hant, zh-Hant-HK ; en-150 ; en-GB
|
||||
en, en-US, en-GB, es, es-419, pt, pt-BR, pt-PT, zh, zh-Hant, zh-Hant-HK ; en-AU ; en-GB
|
||||
en, en-US, en-GB, es, es-419, pt, pt-BR, pt-PT, zh, zh-Hant, zh-Hant-HK ; en-BE ; en-GB
|
||||
en, en-US, en-GB, es, es-419, pt, pt-BR, pt-PT, zh, zh-Hant, zh-Hant-HK ; en-GG ; en-GB
|
||||
en, en-US, en-GB, es, es-419, pt, pt-BR, pt-PT, zh, zh-Hant, zh-Hant-HK ; en-GI ; en-GB
|
||||
en, en-US, en-GB, es, es-419, pt, pt-BR, pt-PT, zh, zh-Hant, zh-Hant-HK ; en-HK ; en-GB
|
||||
en, en-US, en-GB, es, es-419, pt, pt-BR, pt-PT, zh, zh-Hant, zh-Hant-HK ; en-IE ; en-GB
|
||||
en, en-US, en-GB, es, es-419, pt, pt-BR, pt-PT, zh, zh-Hant, zh-Hant-HK ; en-IM ; en-GB
|
||||
en, en-US, en-GB, es, es-419, pt, pt-BR, pt-PT, zh, zh-Hant, zh-Hant-HK ; en-IN ; en-GB
|
||||
en, en-US, en-GB, es, es-419, pt, pt-BR, pt-PT, zh, zh-Hant, zh-Hant-HK ; en-JE ; en-GB
|
||||
en, en-US, en-GB, es, es-419, pt, pt-BR, pt-PT, zh, zh-Hant, zh-Hant-HK ; en-MT ; en-GB
|
||||
en, en-US, en-GB, es, es-419, pt, pt-BR, pt-PT, zh, zh-Hant, zh-Hant-HK ; en-NZ ; en-GB
|
||||
en, en-US, en-GB, es, es-419, pt, pt-BR, pt-PT, zh, zh-Hant, zh-Hant-HK ; en-PK ; en-GB
|
||||
en, en-US, en-GB, es, es-419, pt, pt-BR, pt-PT, zh, zh-Hant, zh-Hant-HK ; en-SG ; en-GB
|
||||
en, en-US, en-GB, es, es-419, pt, pt-BR, pt-PT, zh, zh-Hant, zh-Hant-HK ; en-DE ; en-GB
|
||||
en, en-US, en-GB, es, es-419, pt, pt-BR, pt-PT, zh, zh-Hant, zh-Hant-HK ; en-MT ; en-GB
|
||||
en, en-US, en-GB, es, es-419, pt, pt-BR, pt-PT, zh, zh-Hant, zh-Hant-HK ; es-AR ; es-419 ; es-AR
|
||||
en, en-US, en-GB, es, es-419, pt, pt-BR, pt-PT, zh, zh-Hant, zh-Hant-HK ; es-BO ; es-419 ; es-BO
|
||||
en, en-US, en-GB, es, es-419, pt, pt-BR, pt-PT, zh, zh-Hant, zh-Hant-HK ; es-CL ; es-419 ; es-CL
|
||||
en, en-US, en-GB, es, es-419, pt, pt-BR, pt-PT, zh, zh-Hant, zh-Hant-HK ; es-CO ; es-419 ; es-CO
|
||||
en, en-US, en-GB, es, es-419, pt, pt-BR, pt-PT, zh, zh-Hant, zh-Hant-HK ; es-CR ; es-419 ; es-CR
|
||||
en, en-US, en-GB, es, es-419, pt, pt-BR, pt-PT, zh, zh-Hant, zh-Hant-HK ; es-CU ; es-419 ; es-CU
|
||||
en, en-US, en-GB, es, es-419, pt, pt-BR, pt-PT, zh, zh-Hant, zh-Hant-HK ; es-DO ; es-419 ; es-DO
|
||||
en, en-US, en-GB, es, es-419, pt, pt-BR, pt-PT, zh, zh-Hant, zh-Hant-HK ; es-EC ; es-419 ; es-EC
|
||||
en, en-US, en-GB, es, es-419, pt, pt-BR, pt-PT, zh, zh-Hant, zh-Hant-HK ; es-GT ; es-419 ; es-GT
|
||||
en, en-US, en-GB, es, es-419, pt, pt-BR, pt-PT, zh, zh-Hant, zh-Hant-HK ; es-HN ; es-419 ; es-HN
|
||||
en, en-US, en-GB, es, es-419, pt, pt-BR, pt-PT, zh, zh-Hant, zh-Hant-HK ; es-MX ; es-419 ; es-MX
|
||||
en, en-US, en-GB, es, es-419, pt, pt-BR, pt-PT, zh, zh-Hant, zh-Hant-HK ; es-NI ; es-419 ; es-NI
|
||||
en, en-US, en-GB, es, es-419, pt, pt-BR, pt-PT, zh, zh-Hant, zh-Hant-HK ; es-PA ; es-419 ; es-PA
|
||||
en, en-US, en-GB, es, es-419, pt, pt-BR, pt-PT, zh, zh-Hant, zh-Hant-HK ; es-PE ; es-419 ; es-PE
|
||||
en, en-US, en-GB, es, es-419, pt, pt-BR, pt-PT, zh, zh-Hant, zh-Hant-HK ; es-PR ; es-419 ; es-PR
|
||||
en, en-US, en-GB, es, es-419, pt, pt-BR, pt-PT, zh, zh-Hant, zh-Hant-HK ; es-PT ; es
|
||||
en, en-US, en-GB, es, es-419, pt, pt-BR, pt-PT, zh, zh-Hant, zh-Hant-HK ; es-PY ; es-419 ; es-PY
|
||||
en, en-US, en-GB, es, es-419, pt, pt-BR, pt-PT, zh, zh-Hant, zh-Hant-HK ; es-SV ; es-419 ; es-SV
|
||||
en, en-US, en-GB, es, es-419, pt, pt-BR, pt-PT, zh, zh-Hant, zh-Hant-HK ; es-US ; es-419
|
||||
en, en-US, en-GB, es, es-419, pt, pt-BR, pt-PT, zh, zh-Hant, zh-Hant-HK ; es-UY ; es-419 ; es-UY
|
||||
en, en-US, en-GB, es, es-419, pt, pt-BR, pt-PT, zh, zh-Hant, zh-Hant-HK ; es-VE ; es-419 ; es-VE
|
||||
en, en-US, en-GB, es, es-419, pt, pt-BR, pt-PT, zh, zh-Hant, zh-Hant-HK ; pt-AO ; pt-PT
|
||||
en, en-US, en-GB, es, es-419, pt, pt-BR, pt-PT, zh, zh-Hant, zh-Hant-HK ; pt-CV ; pt-PT
|
||||
en, en-US, en-GB, es, es-419, pt, pt-BR, pt-PT, zh, zh-Hant, zh-Hant-HK ; pt-GW ; pt-PT
|
||||
en, en-US, en-GB, es, es-419, pt, pt-BR, pt-PT, zh, zh-Hant, zh-Hant-HK ; pt-MO ; pt-PT
|
||||
en, en-US, en-GB, es, es-419, pt, pt-BR, pt-PT, zh, zh-Hant, zh-Hant-HK ; pt-MZ ; pt-PT
|
||||
en, en-US, en-GB, es, es-419, pt, pt-BR, pt-PT, zh, zh-Hant, zh-Hant-HK ; pt-ST ; pt-PT
|
||||
en, en-US, en-GB, es, es-419, pt, pt-BR, pt-PT, zh, zh-Hant, zh-Hant-HK ; pt-TL ; pt-PT
|
||||
|
||||
# preserve extensions
|
||||
en, de, sl-nedis ; de-FR-u-co-phonebk ; de ; de-u-co-phonebk
|
||||
en, de, sl-nedis ; sl-nedis-u-cu-eur ; sl-nedis ; sl-nedis-u-cu-eur
|
||||
en, de, sl-nedis ; sl-u-cu-eur ; sl-nedis ; sl-nedis-u-cu-eur
|
||||
en, de, sl-nedis ; sl-HR-nedis-u-cu-eur ; sl-nedis ; sl-nedis-u-cu-eur
|
||||
en, de, sl-nedis ; de-t-m0-iso-i0-pinyin ; de ; de-t-m0-iso-i0-pinyin
|
||||
|
Reference in New Issue
Block a user