testing: replace legacy gopkg.in/check.v1 (#4185)

This commit is contained in:
Milos Gajdos 2023-12-13 21:07:58 +00:00 committed by GitHub
commit b8fb08e0a1
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
93 changed files with 24609 additions and 4328 deletions

View file

@ -3,19 +3,15 @@ package configuration
import ( import (
"bytes" "bytes"
"net/http" "net/http"
"os"
"reflect" "reflect"
"strings" "strings"
"testing" "testing"
"time" "time"
"gopkg.in/check.v1" "github.com/stretchr/testify/suite"
"gopkg.in/yaml.v2" "gopkg.in/yaml.v2"
) )
// Hook up gocheck into the "go test" runner
func Test(t *testing.T) { check.TestingT(t) }
// configStruct is a canonical example configuration, which should map to configYamlV0_1 // configStruct is a canonical example configuration, which should map to configYamlV0_1
var configStruct = Configuration{ var configStruct = Configuration{
Version: "0.1", Version: "0.1",
@ -147,7 +143,7 @@ var configStruct = Configuration{
} }
// configYamlV0_1 is a Version 0.1 yaml document representing configStruct // configYamlV0_1 is a Version 0.1 yaml document representing configStruct
var configYamlV0_1 = ` const configYamlV0_1 = `
version: 0.1 version: 0.1
log: log:
level: info level: info
@ -201,7 +197,7 @@ redis:
// inmemoryConfigYamlV0_1 is a Version 0.1 yaml document specifying an inmemory // inmemoryConfigYamlV0_1 is a Version 0.1 yaml document specifying an inmemory
// storage driver with no parameters // storage driver with no parameters
var inmemoryConfigYamlV0_1 = ` const inmemoryConfigYamlV0_1 = `
version: 0.1 version: 0.1
log: log:
level: info level: info
@ -229,54 +225,56 @@ http:
` `
type ConfigSuite struct { type ConfigSuite struct {
suite.Suite
expectedConfig *Configuration expectedConfig *Configuration
} }
var _ = check.Suite(new(ConfigSuite)) func TestConfigSuite(t *testing.T) {
suite.Run(t, new(ConfigSuite))
}
func (suite *ConfigSuite) SetUpTest(c *check.C) { func (suite *ConfigSuite) SetupTest() {
os.Clearenv()
suite.expectedConfig = copyConfig(configStruct) suite.expectedConfig = copyConfig(configStruct)
} }
// TestMarshalRoundtrip validates that configStruct can be marshaled and // TestMarshalRoundtrip validates that configStruct can be marshaled and
// unmarshaled without changing any parameters // unmarshaled without changing any parameters
func (suite *ConfigSuite) TestMarshalRoundtrip(c *check.C) { func (suite *ConfigSuite) TestMarshalRoundtrip() {
configBytes, err := yaml.Marshal(suite.expectedConfig) configBytes, err := yaml.Marshal(suite.expectedConfig)
c.Assert(err, check.IsNil) suite.Require().NoError(err)
config, err := Parse(bytes.NewReader(configBytes)) config, err := Parse(bytes.NewReader(configBytes))
c.Log(string(configBytes)) suite.T().Log(string(configBytes))
c.Assert(err, check.IsNil) suite.Require().NoError(err)
c.Assert(config, check.DeepEquals, suite.expectedConfig) suite.Require().Equal(suite.expectedConfig, config)
} }
// TestParseSimple validates that configYamlV0_1 can be parsed into a struct // TestParseSimple validates that configYamlV0_1 can be parsed into a struct
// matching configStruct // matching configStruct
func (suite *ConfigSuite) TestParseSimple(c *check.C) { func (suite *ConfigSuite) TestParseSimple() {
config, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) config, err := Parse(bytes.NewReader([]byte(configYamlV0_1)))
c.Assert(err, check.IsNil) suite.Require().NoError(err)
c.Assert(config, check.DeepEquals, suite.expectedConfig) suite.Require().Equal(suite.expectedConfig, config)
} }
// TestParseInmemory validates that configuration yaml with storage provided as // TestParseInmemory validates that configuration yaml with storage provided as
// a string can be parsed into a Configuration struct with no storage parameters // a string can be parsed into a Configuration struct with no storage parameters
func (suite *ConfigSuite) TestParseInmemory(c *check.C) { func (suite *ConfigSuite) TestParseInmemory() {
suite.expectedConfig.Storage = Storage{"inmemory": Parameters{}} suite.expectedConfig.Storage = Storage{"inmemory": Parameters{}}
suite.expectedConfig.Log.Fields = nil suite.expectedConfig.Log.Fields = nil
suite.expectedConfig.Redis = Redis{} suite.expectedConfig.Redis = Redis{}
config, err := Parse(bytes.NewReader([]byte(inmemoryConfigYamlV0_1))) config, err := Parse(bytes.NewReader([]byte(inmemoryConfigYamlV0_1)))
c.Assert(err, check.IsNil) suite.Require().NoError(err)
c.Assert(config, check.DeepEquals, suite.expectedConfig) suite.Require().Equal(suite.expectedConfig, config)
} }
// TestParseIncomplete validates that an incomplete yaml configuration cannot // TestParseIncomplete validates that an incomplete yaml configuration cannot
// be parsed without providing environment variables to fill in the missing // be parsed without providing environment variables to fill in the missing
// components. // components.
func (suite *ConfigSuite) TestParseIncomplete(c *check.C) { func (suite *ConfigSuite) TestParseIncomplete() {
incompleteConfigYaml := "version: 0.1" incompleteConfigYaml := "version: 0.1"
_, err := Parse(bytes.NewReader([]byte(incompleteConfigYaml))) _, err := Parse(bytes.NewReader([]byte(incompleteConfigYaml)))
c.Assert(err, check.NotNil) suite.Require().Error(err)
suite.expectedConfig.Log.Fields = nil suite.expectedConfig.Log.Fields = nil
suite.expectedConfig.Storage = Storage{"filesystem": Parameters{"rootdirectory": "/tmp/testroot"}} suite.expectedConfig.Storage = Storage{"filesystem": Parameters{"rootdirectory": "/tmp/testroot"}}
@ -287,135 +285,134 @@ func (suite *ConfigSuite) TestParseIncomplete(c *check.C) {
// Note: this also tests that REGISTRY_STORAGE and // Note: this also tests that REGISTRY_STORAGE and
// REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY can be used together // REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY can be used together
os.Setenv("REGISTRY_STORAGE", "filesystem") suite.T().Setenv("REGISTRY_STORAGE", "filesystem")
os.Setenv("REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY", "/tmp/testroot") suite.T().Setenv("REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY", "/tmp/testroot")
os.Setenv("REGISTRY_AUTH", "silly") suite.T().Setenv("REGISTRY_AUTH", "silly")
os.Setenv("REGISTRY_AUTH_SILLY_REALM", "silly") suite.T().Setenv("REGISTRY_AUTH_SILLY_REALM", "silly")
config, err := Parse(bytes.NewReader([]byte(incompleteConfigYaml))) config, err := Parse(bytes.NewReader([]byte(incompleteConfigYaml)))
c.Assert(err, check.IsNil) suite.Require().NoError(err)
c.Assert(config, check.DeepEquals, suite.expectedConfig) suite.Require().Equal(suite.expectedConfig, config)
} }
// TestParseWithSameEnvStorage validates that providing environment variables // TestParseWithSameEnvStorage validates that providing environment variables
// that match the given storage type will only include environment-defined // that match the given storage type will only include environment-defined
// parameters and remove yaml-defined parameters // parameters and remove yaml-defined parameters
func (suite *ConfigSuite) TestParseWithSameEnvStorage(c *check.C) { func (suite *ConfigSuite) TestParseWithSameEnvStorage() {
suite.expectedConfig.Storage = Storage{"somedriver": Parameters{"region": "us-east-1"}} suite.expectedConfig.Storage = Storage{"somedriver": Parameters{"region": "us-east-1"}}
os.Setenv("REGISTRY_STORAGE", "somedriver") suite.T().Setenv("REGISTRY_STORAGE", "somedriver")
os.Setenv("REGISTRY_STORAGE_SOMEDRIVER_REGION", "us-east-1") suite.T().Setenv("REGISTRY_STORAGE_SOMEDRIVER_REGION", "us-east-1")
config, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) config, err := Parse(bytes.NewReader([]byte(configYamlV0_1)))
c.Assert(err, check.IsNil) suite.Require().NoError(err)
c.Assert(config, check.DeepEquals, suite.expectedConfig) suite.Require().Equal(suite.expectedConfig, config)
} }
// TestParseWithDifferentEnvStorageParams validates that providing environment variables that change // TestParseWithDifferentEnvStorageParams validates that providing environment variables that change
// and add to the given storage parameters will change and add parameters to the parsed // and add to the given storage parameters will change and add parameters to the parsed
// Configuration struct // Configuration struct
func (suite *ConfigSuite) TestParseWithDifferentEnvStorageParams(c *check.C) { func (suite *ConfigSuite) TestParseWithDifferentEnvStorageParams() {
suite.expectedConfig.Storage.setParameter("string1", "us-west-1") suite.expectedConfig.Storage.setParameter("string1", "us-west-1")
suite.expectedConfig.Storage.setParameter("bool1", true) suite.expectedConfig.Storage.setParameter("bool1", true)
suite.expectedConfig.Storage.setParameter("newparam", "some Value") suite.expectedConfig.Storage.setParameter("newparam", "some Value")
os.Setenv("REGISTRY_STORAGE_SOMEDRIVER_STRING1", "us-west-1") suite.T().Setenv("REGISTRY_STORAGE_SOMEDRIVER_STRING1", "us-west-1")
os.Setenv("REGISTRY_STORAGE_SOMEDRIVER_BOOL1", "true") suite.T().Setenv("REGISTRY_STORAGE_SOMEDRIVER_BOOL1", "true")
os.Setenv("REGISTRY_STORAGE_SOMEDRIVER_NEWPARAM", "some Value") suite.T().Setenv("REGISTRY_STORAGE_SOMEDRIVER_NEWPARAM", "some Value")
config, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) config, err := Parse(bytes.NewReader([]byte(configYamlV0_1)))
c.Assert(err, check.IsNil) suite.Require().NoError(err)
c.Assert(config, check.DeepEquals, suite.expectedConfig) suite.Require().Equal(suite.expectedConfig, config)
} }
// TestParseWithDifferentEnvStorageType validates that providing an environment variable that // TestParseWithDifferentEnvStorageType validates that providing an environment variable that
// changes the storage type will be reflected in the parsed Configuration struct // changes the storage type will be reflected in the parsed Configuration struct
func (suite *ConfigSuite) TestParseWithDifferentEnvStorageType(c *check.C) { func (suite *ConfigSuite) TestParseWithDifferentEnvStorageType() {
suite.expectedConfig.Storage = Storage{"inmemory": Parameters{}} suite.expectedConfig.Storage = Storage{"inmemory": Parameters{}}
os.Setenv("REGISTRY_STORAGE", "inmemory") suite.T().Setenv("REGISTRY_STORAGE", "inmemory")
config, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) config, err := Parse(bytes.NewReader([]byte(configYamlV0_1)))
c.Assert(err, check.IsNil) suite.Require().NoError(err)
c.Assert(config, check.DeepEquals, suite.expectedConfig) suite.Require().Equal(suite.expectedConfig, config)
} }
// TestParseWithDifferentEnvStorageTypeAndParams validates that providing an environment variable // TestParseWithDifferentEnvStorageTypeAndParams validates that providing an environment variable
// that changes the storage type will be reflected in the parsed Configuration struct and that // that changes the storage type will be reflected in the parsed Configuration struct and that
// environment storage parameters will also be included // environment storage parameters will also be included
func (suite *ConfigSuite) TestParseWithDifferentEnvStorageTypeAndParams(c *check.C) { func (suite *ConfigSuite) TestParseWithDifferentEnvStorageTypeAndParams() {
suite.expectedConfig.Storage = Storage{"filesystem": Parameters{}} suite.expectedConfig.Storage = Storage{"filesystem": Parameters{}}
suite.expectedConfig.Storage.setParameter("rootdirectory", "/tmp/testroot") suite.expectedConfig.Storage.setParameter("rootdirectory", "/tmp/testroot")
os.Setenv("REGISTRY_STORAGE", "filesystem") suite.T().Setenv("REGISTRY_STORAGE", "filesystem")
os.Setenv("REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY", "/tmp/testroot") suite.T().Setenv("REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY", "/tmp/testroot")
config, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) config, err := Parse(bytes.NewReader([]byte(configYamlV0_1)))
c.Assert(err, check.IsNil) suite.Require().NoError(err)
c.Assert(config, check.DeepEquals, suite.expectedConfig) suite.Require().Equal(suite.expectedConfig, config)
} }
// TestParseWithSameEnvLoglevel validates that providing an environment variable defining the log // TestParseWithSameEnvLoglevel validates that providing an environment variable defining the log
// level to the same as the one provided in the yaml will not change the parsed Configuration struct // level to the same as the one provided in the yaml will not change the parsed Configuration struct
func (suite *ConfigSuite) TestParseWithSameEnvLoglevel(c *check.C) { func (suite *ConfigSuite) TestParseWithSameEnvLoglevel() {
os.Setenv("REGISTRY_LOGLEVEL", "info") suite.T().Setenv("REGISTRY_LOGLEVEL", "info")
config, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) config, err := Parse(bytes.NewReader([]byte(configYamlV0_1)))
c.Assert(err, check.IsNil) suite.Require().NoError(err)
c.Assert(config, check.DeepEquals, suite.expectedConfig) suite.Require().Equal(suite.expectedConfig, config)
} }
// TestParseWithDifferentEnvLoglevel validates that providing an environment variable defining the // TestParseWithDifferentEnvLoglevel validates that providing an environment variable defining the
// log level will override the value provided in the yaml document // log level will override the value provided in the yaml document
func (suite *ConfigSuite) TestParseWithDifferentEnvLoglevel(c *check.C) { func (suite *ConfigSuite) TestParseWithDifferentEnvLoglevel() {
suite.expectedConfig.Log.Level = "error" suite.expectedConfig.Log.Level = "error"
os.Setenv("REGISTRY_LOG_LEVEL", "error") suite.T().Setenv("REGISTRY_LOG_LEVEL", "error")
config, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) config, err := Parse(bytes.NewReader([]byte(configYamlV0_1)))
c.Assert(err, check.IsNil) suite.Require().NoError(err)
c.Assert(config, check.DeepEquals, suite.expectedConfig) suite.Require().Equal(suite.expectedConfig, config)
} }
// TestParseInvalidLoglevel validates that the parser will fail to parse a // TestParseInvalidLoglevel validates that the parser will fail to parse a
// configuration if the loglevel is malformed // configuration if the loglevel is malformed
func (suite *ConfigSuite) TestParseInvalidLoglevel(c *check.C) { func (suite *ConfigSuite) TestParseInvalidLoglevel() {
invalidConfigYaml := "version: 0.1\nloglevel: derp\nstorage: inmemory" invalidConfigYaml := "version: 0.1\nloglevel: derp\nstorage: inmemory"
_, err := Parse(bytes.NewReader([]byte(invalidConfigYaml))) _, err := Parse(bytes.NewReader([]byte(invalidConfigYaml)))
c.Assert(err, check.NotNil) suite.Require().Error(err)
os.Setenv("REGISTRY_LOGLEVEL", "derp") suite.T().Setenv("REGISTRY_LOGLEVEL", "derp")
_, err = Parse(bytes.NewReader([]byte(configYamlV0_1))) _, err = Parse(bytes.NewReader([]byte(configYamlV0_1)))
c.Assert(err, check.NotNil) suite.Require().Error(err)
} }
// TestParseInvalidVersion validates that the parser will fail to parse a newer configuration // TestParseInvalidVersion validates that the parser will fail to parse a newer configuration
// version than the CurrentVersion // version than the CurrentVersion
func (suite *ConfigSuite) TestParseInvalidVersion(c *check.C) { func (suite *ConfigSuite) TestParseInvalidVersion() {
suite.expectedConfig.Version = MajorMinorVersion(CurrentVersion.Major(), CurrentVersion.Minor()+1) suite.expectedConfig.Version = MajorMinorVersion(CurrentVersion.Major(), CurrentVersion.Minor()+1)
configBytes, err := yaml.Marshal(suite.expectedConfig) configBytes, err := yaml.Marshal(suite.expectedConfig)
c.Assert(err, check.IsNil) suite.Require().NoError(err)
_, err = Parse(bytes.NewReader(configBytes)) _, err = Parse(bytes.NewReader(configBytes))
c.Assert(err, check.NotNil) suite.Require().Error(err)
} }
// TestParseExtraneousVars validates that environment variables referring to // TestParseExtraneousVars validates that environment variables referring to
// nonexistent variables don't cause side effects. // nonexistent variables don't cause side effects.
func (suite *ConfigSuite) TestParseExtraneousVars(c *check.C) { func (suite *ConfigSuite) TestParseExtraneousVars() {
// Environment variables which shouldn't set config items // Environment variables which shouldn't set config items
os.Setenv("REGISTRY_DUCKS", "quack") suite.T().Setenv("REGISTRY_DUCKS", "quack")
os.Setenv("REGISTRY_REPORTING_ASDF", "ghjk") suite.T().Setenv("REGISTRY_REPORTING_ASDF", "ghjk")
config, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) config, err := Parse(bytes.NewReader([]byte(configYamlV0_1)))
c.Assert(err, check.IsNil) suite.Require().NoError(err)
c.Assert(config, check.DeepEquals, suite.expectedConfig) suite.Require().Equal(suite.expectedConfig, config)
} }
// TestParseEnvVarImplicitMaps validates that environment variables can set // TestParseEnvVarImplicitMaps validates that environment variables can set
// values in maps that don't already exist. // values in maps that don't already exist.
func (suite *ConfigSuite) TestParseEnvVarImplicitMaps(c *check.C) { func (suite *ConfigSuite) TestParseEnvVarImplicitMaps() {
readonly := make(map[string]interface{}) readonly := make(map[string]interface{})
readonly["enabled"] = true readonly["enabled"] = true
@ -424,61 +421,63 @@ func (suite *ConfigSuite) TestParseEnvVarImplicitMaps(c *check.C) {
suite.expectedConfig.Storage["maintenance"] = maintenance suite.expectedConfig.Storage["maintenance"] = maintenance
os.Setenv("REGISTRY_STORAGE_MAINTENANCE_READONLY_ENABLED", "true") suite.T().Setenv("REGISTRY_STORAGE_MAINTENANCE_READONLY_ENABLED", "true")
config, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) config, err := Parse(bytes.NewReader([]byte(configYamlV0_1)))
c.Assert(err, check.IsNil) suite.Require().NoError(err)
c.Assert(config, check.DeepEquals, suite.expectedConfig) suite.Require().Equal(suite.expectedConfig, config)
} }
// TestParseEnvWrongTypeMap validates that incorrectly attempting to unmarshal a // TestParseEnvWrongTypeMap validates that incorrectly attempting to unmarshal a
// string over existing map fails. // string over existing map fails.
func (suite *ConfigSuite) TestParseEnvWrongTypeMap(c *check.C) { func (suite *ConfigSuite) TestParseEnvWrongTypeMap() {
os.Setenv("REGISTRY_STORAGE_SOMEDRIVER", "somestring") suite.T().Setenv("REGISTRY_STORAGE_SOMEDRIVER", "somestring")
_, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) _, err := Parse(bytes.NewReader([]byte(configYamlV0_1)))
c.Assert(err, check.NotNil) suite.Require().Error(err)
} }
// TestParseEnvWrongTypeStruct validates that incorrectly attempting to // TestParseEnvWrongTypeStruct validates that incorrectly attempting to
// unmarshal a string into a struct fails. // unmarshal a string into a struct fails.
func (suite *ConfigSuite) TestParseEnvWrongTypeStruct(c *check.C) { func (suite *ConfigSuite) TestParseEnvWrongTypeStruct() {
os.Setenv("REGISTRY_STORAGE_LOG", "somestring") suite.T().Setenv("REGISTRY_STORAGE_LOG", "somestring")
_, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) _, err := Parse(bytes.NewReader([]byte(configYamlV0_1)))
c.Assert(err, check.NotNil) suite.Require().Error(err)
} }
// TestParseEnvWrongTypeSlice validates that incorrectly attempting to // TestParseEnvWrongTypeSlice validates that incorrectly attempting to
// unmarshal a string into a slice fails. // unmarshal a string into a slice fails.
func (suite *ConfigSuite) TestParseEnvWrongTypeSlice(c *check.C) { func (suite *ConfigSuite) TestParseEnvWrongTypeSlice() {
os.Setenv("REGISTRY_LOG_HOOKS", "somestring") suite.T().Setenv("REGISTRY_LOG_HOOKS", "somestring")
_, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) _, err := Parse(bytes.NewReader([]byte(configYamlV0_1)))
c.Assert(err, check.NotNil) suite.Require().Error(err)
} }
// TestParseEnvMany tests several environment variable overrides. // TestParseEnvMany tests several environment variable overrides.
// The result is not checked - the goal of this test is to detect panics // The result is not checked - the goal of this test is to detect panics
// from misuse of reflection. // from misuse of reflection.
func (suite *ConfigSuite) TestParseEnvMany(c *check.C) { func (suite *ConfigSuite) TestParseEnvMany() {
os.Setenv("REGISTRY_VERSION", "0.1") suite.T().Setenv("REGISTRY_VERSION", "0.1")
os.Setenv("REGISTRY_LOG_LEVEL", "debug") suite.T().Setenv("REGISTRY_LOG_LEVEL", "debug")
os.Setenv("REGISTRY_LOG_FORMATTER", "json") suite.T().Setenv("REGISTRY_LOG_FORMATTER", "json")
os.Setenv("REGISTRY_LOG_HOOKS", "json") suite.T().Setenv("REGISTRY_LOG_HOOKS", "json")
os.Setenv("REGISTRY_LOG_FIELDS", "abc: xyz") suite.T().Setenv("REGISTRY_LOG_FIELDS", "abc: xyz")
os.Setenv("REGISTRY_LOG_HOOKS", "- type: asdf") suite.T().Setenv("REGISTRY_LOG_HOOKS", "- type: asdf")
os.Setenv("REGISTRY_LOGLEVEL", "debug") suite.T().Setenv("REGISTRY_LOGLEVEL", "debug")
os.Setenv("REGISTRY_STORAGE", "somedriver") suite.T().Setenv("REGISTRY_STORAGE", "somedriver")
os.Setenv("REGISTRY_AUTH_PARAMS", "param1: value1") suite.T().Setenv("REGISTRY_AUTH_PARAMS", "param1: value1")
os.Setenv("REGISTRY_AUTH_PARAMS_VALUE2", "value2") suite.T().Setenv("REGISTRY_AUTH_PARAMS_VALUE2", "value2")
os.Setenv("REGISTRY_AUTH_PARAMS_VALUE2", "value2") suite.T().Setenv("REGISTRY_AUTH_PARAMS_VALUE2", "value2")
_, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) _, err := Parse(bytes.NewReader([]byte(configYamlV0_1)))
c.Assert(err, check.IsNil) suite.Require().NoError(err)
} }
func checkStructs(c *check.C, t reflect.Type, structsChecked map[string]struct{}) { func checkStructs(tt *testing.T, t reflect.Type, structsChecked map[string]struct{}) {
tt.Helper()
for t.Kind() == reflect.Ptr || t.Kind() == reflect.Map || t.Kind() == reflect.Slice { for t.Kind() == reflect.Ptr || t.Kind() == reflect.Map || t.Kind() == reflect.Slice {
t = t.Elem() t = t.Elem()
} }
@ -500,23 +499,23 @@ func checkStructs(c *check.C, t reflect.Type, structsChecked map[string]struct{}
// Check that the yaml tag does not contain an _. // Check that the yaml tag does not contain an _.
yamlTag := sf.Tag.Get("yaml") yamlTag := sf.Tag.Get("yaml")
if strings.Contains(yamlTag, "_") { if strings.Contains(yamlTag, "_") {
c.Fatalf("yaml field name includes _ character: %s", yamlTag) tt.Fatalf("yaml field name includes _ character: %s", yamlTag)
} }
upper := strings.ToUpper(sf.Name) upper := strings.ToUpper(sf.Name)
if _, present := byUpperCase[upper]; present { if _, present := byUpperCase[upper]; present {
c.Fatalf("field name collision in configuration object: %s", sf.Name) tt.Fatalf("field name collision in configuration object: %s", sf.Name)
} }
byUpperCase[upper] = i byUpperCase[upper] = i
checkStructs(c, sf.Type, structsChecked) checkStructs(tt, sf.Type, structsChecked)
} }
} }
// TestValidateConfigStruct makes sure that the config struct has no members // TestValidateConfigStruct makes sure that the config struct has no members
// with yaml tags that would be ambiguous to the environment variable parser. // with yaml tags that would be ambiguous to the environment variable parser.
func (suite *ConfigSuite) TestValidateConfigStruct(c *check.C) { func (suite *ConfigSuite) TestValidateConfigStruct() {
structsChecked := make(map[string]struct{}) structsChecked := make(map[string]struct{})
checkStructs(c, reflect.TypeOf(Configuration{}), structsChecked) checkStructs(suite.T(), reflect.TypeOf(Configuration{}), structsChecked)
} }
func copyConfig(config Configuration) *Configuration { func copyConfig(config Configuration) *Configuration {

View file

@ -1,10 +1,10 @@
package configuration package configuration
import ( import (
"os"
"reflect" "reflect"
"testing"
"gopkg.in/check.v1" "github.com/stretchr/testify/require"
) )
type localConfiguration struct { type localConfiguration struct {
@ -41,15 +41,10 @@ notifications:
- name: "bar" - name: "bar"
- name: "car"` - name: "car"`
type ParserSuite struct{} func TestParserOverwriteIninitializedPoiner(t *testing.T) {
var _ = check.Suite(new(ParserSuite))
func (suite *ParserSuite) TestParserOverwriteIninitializedPoiner(c *check.C) {
config := localConfiguration{} config := localConfiguration{}
os.Setenv("REGISTRY_LOG_FORMATTER", "json") t.Setenv("REGISTRY_LOG_FORMATTER", "json")
defer os.Unsetenv("REGISTRY_LOG_FORMATTER")
p := NewParser("registry", []VersionedParseInfo{ p := NewParser("registry", []VersionedParseInfo{
{ {
@ -62,8 +57,8 @@ func (suite *ParserSuite) TestParserOverwriteIninitializedPoiner(c *check.C) {
}) })
err := p.Parse([]byte(testConfig), &config) err := p.Parse([]byte(testConfig), &config)
c.Assert(err, check.IsNil) require.NoError(t, err)
c.Assert(config, check.DeepEquals, expectedConfig) require.Equal(t, expectedConfig, config)
} }
const testConfig2 = `version: "0.1" const testConfig2 = `version: "0.1"
@ -74,18 +69,15 @@ notifications:
- name: "val2" - name: "val2"
- name: "car"` - name: "car"`
func (suite *ParserSuite) TestParseOverwriteUnininitializedPoiner(c *check.C) { func TestParseOverwriteUnininitializedPoiner(t *testing.T) {
config := localConfiguration{} config := localConfiguration{}
os.Setenv("REGISTRY_LOG_FORMATTER", "json") t.Setenv("REGISTRY_LOG_FORMATTER", "json")
defer os.Unsetenv("REGISTRY_LOG_FORMATTER")
// override only first two notificationsvalues // override only first two notificationsvalues
// in the tetConfig: leave the last value unchanged. // in the tetConfig: leave the last value unchanged.
os.Setenv("REGISTRY_NOTIFICATIONS_0_NAME", "foo") t.Setenv("REGISTRY_NOTIFICATIONS_0_NAME", "foo")
defer os.Unsetenv("REGISTRY_NOTIFICATIONS_0_NAME") t.Setenv("REGISTRY_NOTIFICATIONS_1_NAME", "bar")
os.Setenv("REGISTRY_NOTIFICATIONS_1_NAME", "bar")
defer os.Unsetenv("REGISTRY_NOTIFICATIONS_1_NAME")
p := NewParser("registry", []VersionedParseInfo{ p := NewParser("registry", []VersionedParseInfo{
{ {
@ -98,6 +90,6 @@ func (suite *ParserSuite) TestParseOverwriteUnininitializedPoiner(c *check.C) {
}) })
err := p.Parse([]byte(testConfig2), &config) err := p.Parse([]byte(testConfig2), &config)
c.Assert(err, check.IsNil) require.NoError(t, err)
c.Assert(config, check.DeepEquals, expectedConfig) require.Equal(t, expectedConfig, config)
} }

45
go.mod
View file

@ -27,36 +27,15 @@ require (
github.com/redis/go-redis/v9 v9.1.0 github.com/redis/go-redis/v9 v9.1.0
github.com/sirupsen/logrus v1.9.3 github.com/sirupsen/logrus v1.9.3
github.com/spf13/cobra v1.8.0 github.com/spf13/cobra v1.8.0
github.com/stretchr/testify v1.8.4
go.opentelemetry.io/contrib/exporters/autoexport v0.46.1 go.opentelemetry.io/contrib/exporters/autoexport v0.46.1
go.opentelemetry.io/otel/sdk v1.21.0 go.opentelemetry.io/otel/sdk v1.21.0
golang.org/x/crypto v0.15.0 golang.org/x/crypto v0.15.0
golang.org/x/oauth2 v0.11.0 golang.org/x/oauth2 v0.11.0
google.golang.org/api v0.126.0 google.golang.org/api v0.126.0
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c
gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v2 v2.4.0
) )
require (
github.com/cenkalti/backoff/v4 v4.2.1 // indirect
github.com/google/s2a-go v0.1.4 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect
github.com/rogpeppe/go-internal v1.11.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.44.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v0.44.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0 // indirect
go.opentelemetry.io/otel/exporters/prometheus v0.44.0 // indirect
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v0.44.0 // indirect
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.21.0 // indirect
go.opentelemetry.io/otel/sdk/metric v1.21.0 // indirect
go.opentelemetry.io/proto/otlp v1.0.0 // indirect
golang.org/x/sync v0.3.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect
)
require ( require (
cloud.google.com/go v0.110.7 // indirect cloud.google.com/go v0.110.7 // indirect
cloud.google.com/go/compute v1.23.0 // indirect cloud.google.com/go/compute v1.23.0 // indirect
@ -65,8 +44,10 @@ require (
github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 // indirect
github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 // indirect github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 // indirect
github.com/beorn7/perks v1.0.1 // indirect github.com/beorn7/perks v1.0.1 // indirect
github.com/cenkalti/backoff/v4 v4.2.1 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/cyphar/filepath-securejoin v0.2.4 // indirect github.com/cyphar/filepath-securejoin v0.2.4 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/go-logr/logr v1.3.0 // indirect github.com/go-logr/logr v1.3.0 // indirect
@ -74,16 +55,17 @@ require (
github.com/golang-jwt/jwt/v4 v4.5.0 // indirect github.com/golang-jwt/jwt/v4 v4.5.0 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.3 // indirect github.com/golang/protobuf v1.5.3 // indirect
github.com/google/s2a-go v0.1.4 // indirect
github.com/googleapis/enterprise-certificate-proxy v0.2.3 // indirect github.com/googleapis/enterprise-certificate-proxy v0.2.3 // indirect
github.com/googleapis/gax-go/v2 v2.11.0 // indirect github.com/googleapis/gax-go/v2 v2.11.0 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect
github.com/hashicorp/golang-lru/v2 v2.0.5 // indirect github.com/hashicorp/golang-lru/v2 v2.0.5 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/kr/pretty v0.3.1 // indirect
github.com/kr/text v0.2.0 // indirect
github.com/kylelemons/godebug v1.1.0 // indirect github.com/kylelemons/godebug v1.1.0 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/client_golang v1.17.0 // indirect; updated to latest github.com/prometheus/client_golang v1.17.0 // indirect; updated to latest
github.com/prometheus/client_model v0.5.0 // indirect github.com/prometheus/client_model v0.5.0 // indirect
github.com/prometheus/common v0.44.0 // indirect github.com/prometheus/common v0.44.0 // indirect
@ -91,15 +73,30 @@ require (
github.com/redis/go-redis/extra/rediscmd/v9 v9.0.5 // indirect github.com/redis/go-redis/extra/rediscmd/v9 v9.0.5 // indirect
github.com/spf13/pflag v1.0.5 // indirect github.com/spf13/pflag v1.0.5 // indirect
go.opencensus.io v0.24.0 // indirect go.opencensus.io v0.24.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1
go.opentelemetry.io/otel v1.21.0 go.opentelemetry.io/otel v1.21.0
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.44.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v0.44.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0 // indirect
go.opentelemetry.io/otel/exporters/prometheus v0.44.0 // indirect
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v0.44.0 // indirect
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.21.0 // indirect
go.opentelemetry.io/otel/metric v1.21.0 // indirect go.opentelemetry.io/otel/metric v1.21.0 // indirect
go.opentelemetry.io/otel/sdk/metric v1.21.0 // indirect
go.opentelemetry.io/otel/trace v1.21.0 // indirect go.opentelemetry.io/otel/trace v1.21.0 // indirect
go.opentelemetry.io/proto/otlp v1.0.0 // indirect
golang.org/x/net v0.18.0 // indirect golang.org/x/net v0.18.0 // indirect
golang.org/x/sync v0.3.0 // indirect
golang.org/x/sys v0.14.0 // indirect golang.org/x/sys v0.14.0 // indirect
golang.org/x/text v0.14.0 // indirect golang.org/x/text v0.14.0 // indirect
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect
google.golang.org/appengine v1.6.7 // indirect google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d // indirect google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect
google.golang.org/grpc v1.59.0 // indirect google.golang.org/grpc v1.59.0 // indirect
google.golang.org/protobuf v1.31.0 // indirect google.golang.org/protobuf v1.31.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
) )

5
go.sum
View file

@ -162,7 +162,6 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxv
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
@ -185,7 +184,6 @@ github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrB
github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU=
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI=
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
@ -216,9 +214,7 @@ github.com/redis/go-redis/v9 v9.0.5/go.mod h1:WqMKv5vnQbRuZstUwxQI195wHy+t4PuXDO
github.com/redis/go-redis/v9 v9.1.0 h1:137FnGdk+EQdCbye1FW+qOEcY5S+SpY9T0NiuqvtfMY= github.com/redis/go-redis/v9 v9.1.0 h1:137FnGdk+EQdCbye1FW+qOEcY5S+SpY9T0NiuqvtfMY=
github.com/redis/go-redis/v9 v9.1.0/go.mod h1:urWj3He21Dj5k4TK1y59xH8Uj6ATueP8AH1cY3lZl4c= github.com/redis/go-redis/v9 v9.1.0/go.mod h1:urWj3He21Dj5k4TK1y59xH8Uj6ATueP8AH1cY3lZl4c=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=
github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
@ -241,6 +237,7 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=

View file

@ -2,7 +2,6 @@ package azure
import ( import (
"context" "context"
"fmt"
"math/rand" "math/rand"
"os" "os"
"strings" "strings"
@ -10,7 +9,6 @@ import (
storagedriver "github.com/distribution/distribution/v3/registry/storage/driver" storagedriver "github.com/distribution/distribution/v3/registry/storage/driver"
"github.com/distribution/distribution/v3/registry/storage/driver/testsuites" "github.com/distribution/distribution/v3/registry/storage/driver/testsuites"
"gopkg.in/check.v1"
) )
const ( const (
@ -22,10 +20,7 @@ const (
) )
var azureDriverConstructor func() (storagedriver.StorageDriver, error) var azureDriverConstructor func() (storagedriver.StorageDriver, error)
var skipCheck func() string var skipCheck func(tb testing.TB)
// Hook up gocheck into the "go test" runner.
func Test(t *testing.T) { check.TestingT(t) }
func init() { func init() {
var ( var (
@ -72,14 +67,23 @@ func init() {
} }
// Skip Azure storage driver tests if environment variable parameters are not provided // Skip Azure storage driver tests if environment variable parameters are not provided
skipCheck = func() string { skipCheck = func(tb testing.TB) {
if len(missing) > 0 { tb.Helper()
return fmt.Sprintf("Must set %s environment variables to run Azure tests", strings.Join(missing, ", "))
}
return ""
}
testsuites.RegisterSuite(azureDriverConstructor, skipCheck) if len(missing) > 0 {
tb.Skipf("Must set %s environment variables to run Azure tests", strings.Join(missing, ", "))
}
}
}
func TestAzureDriverSuite(t *testing.T) {
skipCheck(t)
testsuites.Driver(t, azureDriverConstructor)
}
func BenchmarkAzureDriverSuite(b *testing.B) {
skipCheck(b)
testsuites.BenchDriver(b, azureDriverConstructor)
} }
var letterRunes = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") var letterRunes = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
@ -93,9 +97,7 @@ func randStringRunes(n int) string {
} }
func TestCommitAfterMove(t *testing.T) { func TestCommitAfterMove(t *testing.T) {
if skipCheck() != "" { skipCheck(t)
t.Skip(skipCheck())
}
driver, err := azureDriverConstructor() driver, err := azureDriverConstructor()
if err != nil { if err != nil {

View file

@ -1,35 +1,29 @@
package filesystem package filesystem
import ( import (
"os"
"reflect" "reflect"
"testing" "testing"
storagedriver "github.com/distribution/distribution/v3/registry/storage/driver" storagedriver "github.com/distribution/distribution/v3/registry/storage/driver"
"github.com/distribution/distribution/v3/registry/storage/driver/testsuites" "github.com/distribution/distribution/v3/registry/storage/driver/testsuites"
"gopkg.in/check.v1"
) )
// Hook up gocheck into the "go test" runner. func newDriverConstructor(tb testing.TB) testsuites.DriverConstructor {
func Test(t *testing.T) { check.TestingT(t) } root := tb.TempDir()
func init() { return func() (storagedriver.StorageDriver, error) {
root, err := os.MkdirTemp("", "driver-") return FromParameters(map[string]interface{}{
if err != nil { "rootdirectory": root,
panic(err) })
} }
defer os.Remove(root) }
drvr, err := FromParameters(map[string]interface{}{ func TestFilesystemDriverSuite(t *testing.T) {
"rootdirectory": root, testsuites.Driver(t, newDriverConstructor(t))
}) }
if err != nil {
panic(err)
}
testsuites.RegisterSuite(func() (storagedriver.StorageDriver, error) { func BenchmarkFilesystemDriverSuite(b *testing.B) {
return drvr, nil testsuites.BenchDriver(b, newDriverConstructor(b))
}, testsuites.NeverSkip)
} }
func TestFromParametersImpl(t *testing.T) { func TestFromParametersImpl(t *testing.T) {

View file

@ -14,15 +14,11 @@ import (
"golang.org/x/oauth2/google" "golang.org/x/oauth2/google"
"google.golang.org/api/googleapi" "google.golang.org/api/googleapi"
"google.golang.org/api/option" "google.golang.org/api/option"
"gopkg.in/check.v1"
) )
// Hook up gocheck into the "go test" runner.
func Test(t *testing.T) { check.TestingT(t) }
var ( var (
gcsDriverConstructor func(rootDirectory string) (storagedriver.StorageDriver, error) gcsDriverConstructor func(rootDirectory string) (storagedriver.StorageDriver, error)
skipGCS func() string skipCheck func(tb testing.TB)
) )
func init() { func init() {
@ -30,15 +26,12 @@ func init() {
credentials := os.Getenv("GOOGLE_APPLICATION_CREDENTIALS") credentials := os.Getenv("GOOGLE_APPLICATION_CREDENTIALS")
// Skip GCS storage driver tests if environment variable parameters are not provided // Skip GCS storage driver tests if environment variable parameters are not provided
skipGCS = func() string { skipCheck = func(tb testing.TB) {
if bucket == "" || credentials == "" { tb.Helper()
return "The following environment variables must be set to enable these tests: REGISTRY_STORAGE_GCS_BUCKET, GOOGLE_APPLICATION_CREDENTIALS"
}
return ""
}
if skipGCS() != "" { if bucket == "" || credentials == "" {
return tb.Skip("The following environment variables must be set to enable these tests: REGISTRY_STORAGE_GCS_BUCKET, GOOGLE_APPLICATION_CREDENTIALS")
}
} }
jsonKey, err := os.ReadFile(credentials) jsonKey, err := os.ReadFile(credentials)
@ -46,11 +39,6 @@ func init() {
panic(fmt.Sprintf("Error reading JSON key : %v", err)) panic(fmt.Sprintf("Error reading JSON key : %v", err))
} }
root, err := os.MkdirTemp("", "driver-")
if err != nil {
panic(err)
}
defer os.Remove(root)
var ts oauth2.TokenSource var ts oauth2.TokenSource
var email string var email string
var privateKey []byte var privateKey []byte
@ -82,7 +70,7 @@ func init() {
gcsDriverConstructor = func(rootDirectory string) (storagedriver.StorageDriver, error) { gcsDriverConstructor = func(rootDirectory string) (storagedriver.StorageDriver, error) {
parameters := driverParameters{ parameters := driverParameters{
bucket: bucket, bucket: bucket,
rootDirectory: root, rootDirectory: rootDirectory,
email: email, email: email,
privateKey: privateKey, privateKey: privateKey,
client: oauth2.NewClient(dcontext.Background(), ts), client: oauth2.NewClient(dcontext.Background(), ts),
@ -93,17 +81,29 @@ func init() {
return New(context.Background(), parameters) return New(context.Background(), parameters)
} }
}
testsuites.RegisterSuite(func() (storagedriver.StorageDriver, error) { func newDriverConstructor(tb testing.TB) testsuites.DriverConstructor {
root := tb.TempDir()
return func() (storagedriver.StorageDriver, error) {
return gcsDriverConstructor(root) return gcsDriverConstructor(root)
}, skipGCS) }
}
func TestGCSDriverSuite(t *testing.T) {
skipCheck(t)
testsuites.Driver(t, newDriverConstructor(t))
}
func BenchmarkGCSDriverSuite(b *testing.B) {
skipCheck(b)
testsuites.BenchDriver(b, newDriverConstructor(b))
} }
// Test Committing a FileWriter without having called Write // Test Committing a FileWriter without having called Write
func TestCommitEmpty(t *testing.T) { func TestCommitEmpty(t *testing.T) {
if skipGCS() != "" { skipCheck(t)
t.Skip(skipGCS())
}
validRoot := t.TempDir() validRoot := t.TempDir()
@ -144,9 +144,7 @@ func TestCommitEmpty(t *testing.T) {
// Test Committing a FileWriter after having written exactly // Test Committing a FileWriter after having written exactly
// defaultChunksize bytes. // defaultChunksize bytes.
func TestCommit(t *testing.T) { func TestCommit(t *testing.T) {
if skipGCS() != "" { skipCheck(t)
t.Skip(skipGCS())
}
validRoot := t.TempDir() validRoot := t.TempDir()
@ -190,9 +188,7 @@ func TestCommit(t *testing.T) {
} }
func TestRetry(t *testing.T) { func TestRetry(t *testing.T) {
if skipGCS() != "" { skipCheck(t)
t.Skip(skipGCS())
}
assertError := func(expected string, observed error) { assertError := func(expected string, observed error) {
observedMsg := "<nil>" observedMsg := "<nil>"
@ -227,9 +223,7 @@ func TestRetry(t *testing.T) {
} }
func TestEmptyRootList(t *testing.T) { func TestEmptyRootList(t *testing.T) {
if skipGCS() != "" { skipCheck(t)
t.Skip(skipGCS())
}
validRoot := t.TempDir() validRoot := t.TempDir()
@ -284,9 +278,7 @@ func TestEmptyRootList(t *testing.T) {
// TestMoveDirectory checks that moving a directory returns an error. // TestMoveDirectory checks that moving a directory returns an error.
func TestMoveDirectory(t *testing.T) { func TestMoveDirectory(t *testing.T) {
if skipGCS() != "" { skipCheck(t)
t.Skip(skipGCS())
}
validRoot := t.TempDir() validRoot := t.TempDir()

View file

@ -5,15 +5,16 @@ import (
storagedriver "github.com/distribution/distribution/v3/registry/storage/driver" storagedriver "github.com/distribution/distribution/v3/registry/storage/driver"
"github.com/distribution/distribution/v3/registry/storage/driver/testsuites" "github.com/distribution/distribution/v3/registry/storage/driver/testsuites"
"gopkg.in/check.v1"
) )
// Hook up gocheck into the "go test" runner. func newDriverConstructor() (storagedriver.StorageDriver, error) {
func Test(t *testing.T) { check.TestingT(t) } return New(), nil
}
func init() {
inmemoryDriverConstructor := func() (storagedriver.StorageDriver, error) { func TestInMemoryDriverSuite(t *testing.T) {
return New(), nil testsuites.Driver(t, newDriverConstructor)
} }
testsuites.RegisterSuite(inmemoryDriverConstructor, testsuites.NeverSkip)
func BenchmarkInMemoryDriverSuite(b *testing.B) {
testsuites.BenchDriver(b, newDriverConstructor)
} }

View file

@ -5,19 +5,13 @@ import (
"os" "os"
"testing" "testing"
"gopkg.in/check.v1" "github.com/stretchr/testify/require"
) )
func Test(t *testing.T) { check.TestingT(t) } func TestNoConfig(t *testing.T) {
type MiddlewareSuite struct{}
var _ = check.Suite(&MiddlewareSuite{})
func (s *MiddlewareSuite) TestNoConfig(c *check.C) {
options := make(map[string]interface{}) options := make(map[string]interface{})
_, err := newCloudFrontStorageMiddleware(context.Background(), nil, options) _, err := newCloudFrontStorageMiddleware(context.Background(), nil, options)
c.Assert(err, check.ErrorMatches, "no baseurl provided") require.ErrorContains(t, err, "no baseurl provided")
} }
func TestCloudFrontStorageMiddlewareGenerateKey(t *testing.T) { func TestCloudFrontStorageMiddlewareGenerateKey(t *testing.T) {

View file

@ -4,99 +4,93 @@ import (
"context" "context"
"testing" "testing"
"gopkg.in/check.v1" "github.com/stretchr/testify/require"
) )
func Test(t *testing.T) { check.TestingT(t) } func TestNoConfig(t *testing.T) {
type MiddlewareSuite struct{}
var _ = check.Suite(&MiddlewareSuite{})
func (s *MiddlewareSuite) TestNoConfig(c *check.C) {
options := make(map[string]interface{}) options := make(map[string]interface{})
_, err := newRedirectStorageMiddleware(context.Background(), nil, options) _, err := newRedirectStorageMiddleware(context.Background(), nil, options)
c.Assert(err, check.ErrorMatches, "no baseurl provided") require.ErrorContains(t, err, "no baseurl provided")
} }
func (s *MiddlewareSuite) TestMissingScheme(c *check.C) { func TestMissingScheme(t *testing.T) {
options := make(map[string]interface{}) options := make(map[string]interface{})
options["baseurl"] = "example.com" options["baseurl"] = "example.com"
_, err := newRedirectStorageMiddleware(context.Background(), nil, options) _, err := newRedirectStorageMiddleware(context.Background(), nil, options)
c.Assert(err, check.ErrorMatches, "no scheme specified for redirect baseurl") require.ErrorContains(t, err, "no scheme specified for redirect baseurl")
} }
func (s *MiddlewareSuite) TestHttpsPort(c *check.C) { func TestHttpsPort(t *testing.T) {
options := make(map[string]interface{}) options := make(map[string]interface{})
options["baseurl"] = "https://example.com:5443" options["baseurl"] = "https://example.com:5443"
middleware, err := newRedirectStorageMiddleware(context.Background(), nil, options) middleware, err := newRedirectStorageMiddleware(context.Background(), nil, options)
c.Assert(err, check.Equals, nil) require.NoError(t, err)
m, ok := middleware.(*redirectStorageMiddleware) m, ok := middleware.(*redirectStorageMiddleware)
c.Assert(ok, check.Equals, true) require.True(t, ok)
c.Assert(m.scheme, check.Equals, "https") require.Equal(t, "https", m.scheme)
c.Assert(m.host, check.Equals, "example.com:5443") require.Equal(t, "example.com:5443", m.host)
url, err := middleware.RedirectURL(nil, "/rick/data") url, err := middleware.RedirectURL(nil, "/rick/data")
c.Assert(err, check.Equals, nil) require.NoError(t, err)
c.Assert(url, check.Equals, "https://example.com:5443/rick/data") require.Equal(t, "https://example.com:5443/rick/data", url)
} }
func (s *MiddlewareSuite) TestHTTP(c *check.C) { func TestHTTP(t *testing.T) {
options := make(map[string]interface{}) options := make(map[string]interface{})
options["baseurl"] = "http://example.com" options["baseurl"] = "http://example.com"
middleware, err := newRedirectStorageMiddleware(context.Background(), nil, options) middleware, err := newRedirectStorageMiddleware(context.Background(), nil, options)
c.Assert(err, check.Equals, nil) require.NoError(t, err)
m, ok := middleware.(*redirectStorageMiddleware) m, ok := middleware.(*redirectStorageMiddleware)
c.Assert(ok, check.Equals, true) require.True(t, ok)
c.Assert(m.scheme, check.Equals, "http") require.Equal(t, "http", m.scheme)
c.Assert(m.host, check.Equals, "example.com") require.Equal(t, "example.com", m.host)
url, err := middleware.RedirectURL(nil, "morty/data") url, err := middleware.RedirectURL(nil, "morty/data")
c.Assert(err, check.Equals, nil) require.NoError(t, err)
c.Assert(url, check.Equals, "http://example.com/morty/data") require.Equal(t, "http://example.com/morty/data", url)
} }
func (s *MiddlewareSuite) TestPath(c *check.C) { func TestPath(t *testing.T) {
// basePath: end with no slash // basePath: end with no slash
options := make(map[string]interface{}) options := make(map[string]interface{})
options["baseurl"] = "https://example.com/path" options["baseurl"] = "https://example.com/path"
middleware, err := newRedirectStorageMiddleware(context.Background(), nil, options) middleware, err := newRedirectStorageMiddleware(context.Background(), nil, options)
c.Assert(err, check.Equals, nil) require.NoError(t, err)
m, ok := middleware.(*redirectStorageMiddleware) m, ok := middleware.(*redirectStorageMiddleware)
c.Assert(ok, check.Equals, true) require.True(t, ok)
c.Assert(m.scheme, check.Equals, "https") require.Equal(t, "https", m.scheme)
c.Assert(m.host, check.Equals, "example.com") require.Equal(t, "example.com", m.host)
c.Assert(m.basePath, check.Equals, "/path") require.Equal(t, "/path", m.basePath)
// call RedirectURL() with no leading slash // call RedirectURL() with no leading slash
url, err := middleware.RedirectURL(nil, "morty/data") url, err := middleware.RedirectURL(nil, "morty/data")
c.Assert(err, check.Equals, nil) require.NoError(t, err)
c.Assert(url, check.Equals, "https://example.com/path/morty/data") require.Equal(t, "https://example.com/path/morty/data", url)
// call RedirectURL() with leading slash // call RedirectURL() with leading slash
url, err = middleware.RedirectURL(nil, "/morty/data") url, err = middleware.RedirectURL(nil, "/morty/data")
c.Assert(err, check.Equals, nil) require.NoError(t, err)
c.Assert(url, check.Equals, "https://example.com/path/morty/data") require.Equal(t, "https://example.com/path/morty/data", url)
// basePath: end with slash // basePath: end with slash
options["baseurl"] = "https://example.com/path/" options["baseurl"] = "https://example.com/path/"
middleware, err = newRedirectStorageMiddleware(context.Background(), nil, options) middleware, err = newRedirectStorageMiddleware(context.Background(), nil, options)
c.Assert(err, check.Equals, nil) require.NoError(t, err)
m, ok = middleware.(*redirectStorageMiddleware) m, ok = middleware.(*redirectStorageMiddleware)
c.Assert(ok, check.Equals, true) require.True(t, ok)
c.Assert(m.scheme, check.Equals, "https") require.Equal(t, "https", m.scheme)
c.Assert(m.host, check.Equals, "example.com") require.Equal(t, "example.com", m.host)
c.Assert(m.basePath, check.Equals, "/path/") require.Equal(t, "/path/", m.basePath)
// call RedirectURL() with no leading slash // call RedirectURL() with no leading slash
url, err = middleware.RedirectURL(nil, "morty/data") url, err = middleware.RedirectURL(nil, "morty/data")
c.Assert(err, check.Equals, nil) require.NoError(t, err)
c.Assert(url, check.Equals, "https://example.com/path/morty/data") require.Equal(t, "https://example.com/path/morty/data", url)
// call RedirectURL() with leading slash // call RedirectURL() with leading slash
url, err = middleware.RedirectURL(nil, "/morty/data") url, err = middleware.RedirectURL(nil, "/morty/data")
c.Assert(err, check.Equals, nil) require.NoError(t, err)
c.Assert(url, check.Equals, "https://example.com/path/morty/data") require.Equal(t, "https://example.com/path/morty/data", url)
} }

View file

@ -680,6 +680,10 @@ func (d *driver) Reader(ctx context.Context, path string, offset int64) (io.Read
// Writer returns a FileWriter which will store the content written to it // Writer returns a FileWriter which will store the content written to it
// at the location designated by "path" after the call to Commit. // at the location designated by "path" after the call to Commit.
// It only allows appending to paths with zero size committed content,
// in which the existing content is overridden with the new content.
// It returns storagedriver.Error when appending to paths
// with non-zero committed content.
func (d *driver) Writer(ctx context.Context, path string, appendMode bool) (storagedriver.FileWriter, error) { func (d *driver) Writer(ctx context.Context, path string, appendMode bool) (storagedriver.FileWriter, error) {
key := d.s3Path(path) key := d.s3Path(path)
if !appendMode { if !appendMode {
@ -713,7 +717,30 @@ func (d *driver) Writer(ctx context.Context, path string, appendMode bool) (stor
// if there were no more results to return after the first call, resp.IsTruncated would have been false // if there were no more results to return after the first call, resp.IsTruncated would have been false
// and the loop would be exited without recalling ListMultipartUploads // and the loop would be exited without recalling ListMultipartUploads
if len(resp.Uploads) == 0 { if len(resp.Uploads) == 0 {
break fi, err := d.Stat(ctx, path)
if err != nil {
return nil, parseError(path, err)
}
if fi.Size() == 0 {
resp, err := d.S3.CreateMultipartUploadWithContext(ctx, &s3.CreateMultipartUploadInput{
Bucket: aws.String(d.Bucket),
Key: aws.String(key),
ContentType: d.getContentType(),
ACL: d.getACL(),
ServerSideEncryption: d.getEncryptionMode(),
SSEKMSKeyId: d.getSSEKMSKeyID(),
StorageClass: d.getStorageClass(),
})
if err != nil {
return nil, err
}
return d.newWriter(ctx, key, *resp.UploadId, nil), nil
}
return nil, storagedriver.Error{
DriverName: driverName,
Detail: fmt.Errorf("append to zero-size path %s unsupported", path),
}
} }
var allParts []*s3.Part var allParts []*s3.Part

View file

@ -16,7 +16,6 @@ import (
"github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/s3" "github.com/aws/aws-sdk-go/service/s3"
"github.com/distribution/distribution/v3/internal/dcontext" "github.com/distribution/distribution/v3/internal/dcontext"
storagedriver "github.com/distribution/distribution/v3/registry/storage/driver" storagedriver "github.com/distribution/distribution/v3/registry/storage/driver"
"github.com/distribution/distribution/v3/registry/storage/driver/testsuites" "github.com/distribution/distribution/v3/registry/storage/driver/testsuites"
@ -24,7 +23,7 @@ import (
var ( var (
s3DriverConstructor func(rootDirectory, storageClass string) (*Driver, error) s3DriverConstructor func(rootDirectory, storageClass string) (*Driver, error)
skipS3 func() string skipCheck func(tb testing.TB)
) )
func init() { func init() {
@ -48,12 +47,7 @@ func init() {
logLevel = os.Getenv("S3_LOGLEVEL") logLevel = os.Getenv("S3_LOGLEVEL")
) )
root, err := os.MkdirTemp("", "driver-") var err error
if err != nil {
panic(err)
}
defer os.Remove(root)
s3DriverConstructor = func(rootDirectory, storageClass string) (*Driver, error) { s3DriverConstructor = func(rootDirectory, storageClass string) (*Driver, error) {
encryptBool := false encryptBool := false
if encrypt != "" { if encrypt != "" {
@ -146,22 +140,35 @@ func init() {
} }
// Skip S3 storage driver tests if environment variable parameters are not provided // Skip S3 storage driver tests if environment variable parameters are not provided
skipS3 = func() string { skipCheck = func(tb testing.TB) {
if accessKey == "" || secretKey == "" || region == "" || bucket == "" || encrypt == "" { tb.Helper()
return "Must set AWS_ACCESS_KEY, AWS_SECRET_KEY, AWS_REGION, S3_BUCKET, and S3_ENCRYPT to run S3 tests"
}
return ""
}
testsuites.RegisterSuite(func() (storagedriver.StorageDriver, error) { if accessKey == "" || secretKey == "" || region == "" || bucket == "" || encrypt == "" {
tb.Skip("Must set AWS_ACCESS_KEY, AWS_SECRET_KEY, AWS_REGION, S3_BUCKET, and S3_ENCRYPT to run S3 tests")
}
}
}
func newDriverConstructor(tb testing.TB) testsuites.DriverConstructor {
root := tb.TempDir()
return func() (storagedriver.StorageDriver, error) {
return s3DriverConstructor(root, s3.StorageClassStandard) return s3DriverConstructor(root, s3.StorageClassStandard)
}, skipS3) }
}
func TestS3DriverSuite(t *testing.T) {
skipCheck(t)
testsuites.Driver(t, newDriverConstructor(t))
}
func BenchmarkS3DriverSuite(b *testing.B) {
skipCheck(b)
testsuites.BenchDriver(b, newDriverConstructor(b))
} }
func TestEmptyRootList(t *testing.T) { func TestEmptyRootList(t *testing.T) {
if skipS3() != "" { skipCheck(t)
t.Skip(skipS3())
}
validRoot := t.TempDir() validRoot := t.TempDir()
rootedDriver, err := s3DriverConstructor(validRoot, s3.StorageClassStandard) rootedDriver, err := s3DriverConstructor(validRoot, s3.StorageClassStandard)
@ -205,9 +212,7 @@ func TestEmptyRootList(t *testing.T) {
} }
func TestStorageClass(t *testing.T) { func TestStorageClass(t *testing.T) {
if skipS3() != "" { skipCheck(t)
t.Skip(skipS3())
}
rootDir := t.TempDir() rootDir := t.TempDir()
contents := []byte("contents") contents := []byte("contents")
@ -269,9 +274,7 @@ func TestStorageClass(t *testing.T) {
} }
func TestDelete(t *testing.T) { func TestDelete(t *testing.T) {
if skipS3() != "" { skipCheck(t)
t.Skip(skipS3())
}
rootDir := t.TempDir() rootDir := t.TempDir()
@ -470,9 +473,7 @@ func TestDelete(t *testing.T) {
} }
func TestWalk(t *testing.T) { func TestWalk(t *testing.T) {
if skipS3() != "" { skipCheck(t)
t.Skip(skipS3())
}
rootDir := t.TempDir() rootDir := t.TempDir()
@ -711,9 +712,7 @@ func TestWalk(t *testing.T) {
} }
func TestOverThousandBlobs(t *testing.T) { func TestOverThousandBlobs(t *testing.T) {
if skipS3() != "" { skipCheck(t)
t.Skip(skipS3())
}
rootDir := t.TempDir() rootDir := t.TempDir()
standardDriver, err := s3DriverConstructor(rootDir, s3.StorageClassStandard) standardDriver, err := s3DriverConstructor(rootDir, s3.StorageClassStandard)
@ -739,9 +738,7 @@ func TestOverThousandBlobs(t *testing.T) {
} }
func TestMoveWithMultipartCopy(t *testing.T) { func TestMoveWithMultipartCopy(t *testing.T) {
if skipS3() != "" { skipCheck(t)
t.Skip(skipS3())
}
rootDir := t.TempDir() rootDir := t.TempDir()
d, err := s3DriverConstructor(rootDir, s3.StorageClassStandard) d, err := s3DriverConstructor(rootDir, s3.StorageClassStandard)
@ -792,9 +789,7 @@ func TestMoveWithMultipartCopy(t *testing.T) {
} }
func TestListObjectsV2(t *testing.T) { func TestListObjectsV2(t *testing.T) {
if skipS3() != "" { skipCheck(t)
t.Skip(skipS3())
}
rootDir := t.TempDir() rootDir := t.TempDir()
d, err := s3DriverConstructor(rootDir, s3.StorageClassStandard) d, err := s3DriverConstructor(rootDir, s3.StorageClassStandard)

View file

@ -74,6 +74,11 @@ type StorageDriver interface {
// Writer returns a FileWriter which will store the content written to it // Writer returns a FileWriter which will store the content written to it
// at the location designated by "path" after the call to Commit. // at the location designated by "path" after the call to Commit.
// A path may be appended to if it has not been committed, or if the
// existing committed content is zero length.
//
// The behaviour of appending to paths with non-empty committed content is
// undefined. Specific implementations may document their own behavior.
Writer(ctx context.Context, path string, append bool) (FileWriter, error) Writer(ctx context.Context, path string, append bool) (FileWriter, error)
// Stat retrieves the FileInfo for the given path, including the current // Stat retrieves the FileInfo for the given path, including the current

File diff suppressed because it is too large Load diff

15
vendor/github.com/davecgh/go-spew/LICENSE generated vendored Normal file
View file

@ -0,0 +1,15 @@
ISC License
Copyright (c) 2012-2016 Dave Collins <dave@davec.name>
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.

145
vendor/github.com/davecgh/go-spew/spew/bypass.go generated vendored Normal file
View file

@ -0,0 +1,145 @@
// Copyright (c) 2015-2016 Dave Collins <dave@davec.name>
//
// Permission to use, copy, modify, and distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
// copyright notice and this permission notice appear in all copies.
//
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
// NOTE: Due to the following build constraints, this file will only be compiled
// when the code is not running on Google App Engine, compiled by GopherJS, and
// "-tags safe" is not added to the go build command line. The "disableunsafe"
// tag is deprecated and thus should not be used.
// Go versions prior to 1.4 are disabled because they use a different layout
// for interfaces which make the implementation of unsafeReflectValue more complex.
// +build !js,!appengine,!safe,!disableunsafe,go1.4
package spew
import (
"reflect"
"unsafe"
)
const (
// UnsafeDisabled is a build-time constant which specifies whether or
// not access to the unsafe package is available.
UnsafeDisabled = false
// ptrSize is the size of a pointer on the current arch.
ptrSize = unsafe.Sizeof((*byte)(nil))
)
type flag uintptr
var (
// flagRO indicates whether the value field of a reflect.Value
// is read-only.
flagRO flag
// flagAddr indicates whether the address of the reflect.Value's
// value may be taken.
flagAddr flag
)
// flagKindMask holds the bits that make up the kind
// part of the flags field. In all the supported versions,
// it is in the lower 5 bits.
const flagKindMask = flag(0x1f)
// Different versions of Go have used different
// bit layouts for the flags type. This table
// records the known combinations.
var okFlags = []struct {
ro, addr flag
}{{
// From Go 1.4 to 1.5
ro: 1 << 5,
addr: 1 << 7,
}, {
// Up to Go tip.
ro: 1<<5 | 1<<6,
addr: 1 << 8,
}}
var flagValOffset = func() uintptr {
field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
if !ok {
panic("reflect.Value has no flag field")
}
return field.Offset
}()
// flagField returns a pointer to the flag field of a reflect.Value.
func flagField(v *reflect.Value) *flag {
return (*flag)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + flagValOffset))
}
// unsafeReflectValue converts the passed reflect.Value into a one that bypasses
// the typical safety restrictions preventing access to unaddressable and
// unexported data. It works by digging the raw pointer to the underlying
// value out of the protected value and generating a new unprotected (unsafe)
// reflect.Value to it.
//
// This allows us to check for implementations of the Stringer and error
// interfaces to be used for pretty printing ordinarily unaddressable and
// inaccessible values such as unexported struct fields.
func unsafeReflectValue(v reflect.Value) reflect.Value {
if !v.IsValid() || (v.CanInterface() && v.CanAddr()) {
return v
}
flagFieldPtr := flagField(&v)
*flagFieldPtr &^= flagRO
*flagFieldPtr |= flagAddr
return v
}
// Sanity checks against future reflect package changes
// to the type or semantics of the Value.flag field.
func init() {
field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
if !ok {
panic("reflect.Value has no flag field")
}
if field.Type.Kind() != reflect.TypeOf(flag(0)).Kind() {
panic("reflect.Value flag field has changed kind")
}
type t0 int
var t struct {
A t0
// t0 will have flagEmbedRO set.
t0
// a will have flagStickyRO set
a t0
}
vA := reflect.ValueOf(t).FieldByName("A")
va := reflect.ValueOf(t).FieldByName("a")
vt0 := reflect.ValueOf(t).FieldByName("t0")
// Infer flagRO from the difference between the flags
// for the (otherwise identical) fields in t.
flagPublic := *flagField(&vA)
flagWithRO := *flagField(&va) | *flagField(&vt0)
flagRO = flagPublic ^ flagWithRO
// Infer flagAddr from the difference between a value
// taken from a pointer and not.
vPtrA := reflect.ValueOf(&t).Elem().FieldByName("A")
flagNoPtr := *flagField(&vA)
flagPtr := *flagField(&vPtrA)
flagAddr = flagNoPtr ^ flagPtr
// Check that the inferred flags tally with one of the known versions.
for _, f := range okFlags {
if flagRO == f.ro && flagAddr == f.addr {
return
}
}
panic("reflect.Value read-only flag has changed semantics")
}

38
vendor/github.com/davecgh/go-spew/spew/bypasssafe.go generated vendored Normal file
View file

@ -0,0 +1,38 @@
// Copyright (c) 2015-2016 Dave Collins <dave@davec.name>
//
// Permission to use, copy, modify, and distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
// copyright notice and this permission notice appear in all copies.
//
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
// NOTE: Due to the following build constraints, this file will only be compiled
// when the code is running on Google App Engine, compiled by GopherJS, or
// "-tags safe" is added to the go build command line. The "disableunsafe"
// tag is deprecated and thus should not be used.
// +build js appengine safe disableunsafe !go1.4
package spew
import "reflect"
const (
// UnsafeDisabled is a build-time constant which specifies whether or
// not access to the unsafe package is available.
UnsafeDisabled = true
)
// unsafeReflectValue typically converts the passed reflect.Value into a one
// that bypasses the typical safety restrictions preventing access to
// unaddressable and unexported data. However, doing this relies on access to
// the unsafe package. This is a stub version which simply returns the passed
// reflect.Value when the unsafe package is not available.
func unsafeReflectValue(v reflect.Value) reflect.Value {
return v
}

341
vendor/github.com/davecgh/go-spew/spew/common.go generated vendored Normal file
View file

@ -0,0 +1,341 @@
/*
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
package spew
import (
"bytes"
"fmt"
"io"
"reflect"
"sort"
"strconv"
)
// Some constants in the form of bytes to avoid string overhead. This mirrors
// the technique used in the fmt package.
var (
panicBytes = []byte("(PANIC=")
plusBytes = []byte("+")
iBytes = []byte("i")
trueBytes = []byte("true")
falseBytes = []byte("false")
interfaceBytes = []byte("(interface {})")
commaNewlineBytes = []byte(",\n")
newlineBytes = []byte("\n")
openBraceBytes = []byte("{")
openBraceNewlineBytes = []byte("{\n")
closeBraceBytes = []byte("}")
asteriskBytes = []byte("*")
colonBytes = []byte(":")
colonSpaceBytes = []byte(": ")
openParenBytes = []byte("(")
closeParenBytes = []byte(")")
spaceBytes = []byte(" ")
pointerChainBytes = []byte("->")
nilAngleBytes = []byte("<nil>")
maxNewlineBytes = []byte("<max depth reached>\n")
maxShortBytes = []byte("<max>")
circularBytes = []byte("<already shown>")
circularShortBytes = []byte("<shown>")
invalidAngleBytes = []byte("<invalid>")
openBracketBytes = []byte("[")
closeBracketBytes = []byte("]")
percentBytes = []byte("%")
precisionBytes = []byte(".")
openAngleBytes = []byte("<")
closeAngleBytes = []byte(">")
openMapBytes = []byte("map[")
closeMapBytes = []byte("]")
lenEqualsBytes = []byte("len=")
capEqualsBytes = []byte("cap=")
)
// hexDigits is used to map a decimal value to a hex digit.
var hexDigits = "0123456789abcdef"
// catchPanic handles any panics that might occur during the handleMethods
// calls.
func catchPanic(w io.Writer, v reflect.Value) {
if err := recover(); err != nil {
w.Write(panicBytes)
fmt.Fprintf(w, "%v", err)
w.Write(closeParenBytes)
}
}
// handleMethods attempts to call the Error and String methods on the underlying
// type the passed reflect.Value represents and outputes the result to Writer w.
//
// It handles panics in any called methods by catching and displaying the error
// as the formatted value.
func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) {
// We need an interface to check if the type implements the error or
// Stringer interface. However, the reflect package won't give us an
// interface on certain things like unexported struct fields in order
// to enforce visibility rules. We use unsafe, when it's available,
// to bypass these restrictions since this package does not mutate the
// values.
if !v.CanInterface() {
if UnsafeDisabled {
return false
}
v = unsafeReflectValue(v)
}
// Choose whether or not to do error and Stringer interface lookups against
// the base type or a pointer to the base type depending on settings.
// Technically calling one of these methods with a pointer receiver can
// mutate the value, however, types which choose to satisify an error or
// Stringer interface with a pointer receiver should not be mutating their
// state inside these interface methods.
if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() {
v = unsafeReflectValue(v)
}
if v.CanAddr() {
v = v.Addr()
}
// Is it an error or Stringer?
switch iface := v.Interface().(type) {
case error:
defer catchPanic(w, v)
if cs.ContinueOnMethod {
w.Write(openParenBytes)
w.Write([]byte(iface.Error()))
w.Write(closeParenBytes)
w.Write(spaceBytes)
return false
}
w.Write([]byte(iface.Error()))
return true
case fmt.Stringer:
defer catchPanic(w, v)
if cs.ContinueOnMethod {
w.Write(openParenBytes)
w.Write([]byte(iface.String()))
w.Write(closeParenBytes)
w.Write(spaceBytes)
return false
}
w.Write([]byte(iface.String()))
return true
}
return false
}
// printBool outputs a boolean value as true or false to Writer w.
func printBool(w io.Writer, val bool) {
if val {
w.Write(trueBytes)
} else {
w.Write(falseBytes)
}
}
// printInt outputs a signed integer value to Writer w.
func printInt(w io.Writer, val int64, base int) {
w.Write([]byte(strconv.FormatInt(val, base)))
}
// printUint outputs an unsigned integer value to Writer w.
func printUint(w io.Writer, val uint64, base int) {
w.Write([]byte(strconv.FormatUint(val, base)))
}
// printFloat outputs a floating point value using the specified precision,
// which is expected to be 32 or 64bit, to Writer w.
func printFloat(w io.Writer, val float64, precision int) {
w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision)))
}
// printComplex outputs a complex value using the specified float precision
// for the real and imaginary parts to Writer w.
func printComplex(w io.Writer, c complex128, floatPrecision int) {
r := real(c)
w.Write(openParenBytes)
w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision)))
i := imag(c)
if i >= 0 {
w.Write(plusBytes)
}
w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision)))
w.Write(iBytes)
w.Write(closeParenBytes)
}
// printHexPtr outputs a uintptr formatted as hexadecimal with a leading '0x'
// prefix to Writer w.
func printHexPtr(w io.Writer, p uintptr) {
// Null pointer.
num := uint64(p)
if num == 0 {
w.Write(nilAngleBytes)
return
}
// Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix
buf := make([]byte, 18)
// It's simpler to construct the hex string right to left.
base := uint64(16)
i := len(buf) - 1
for num >= base {
buf[i] = hexDigits[num%base]
num /= base
i--
}
buf[i] = hexDigits[num]
// Add '0x' prefix.
i--
buf[i] = 'x'
i--
buf[i] = '0'
// Strip unused leading bytes.
buf = buf[i:]
w.Write(buf)
}
// valuesSorter implements sort.Interface to allow a slice of reflect.Value
// elements to be sorted.
type valuesSorter struct {
values []reflect.Value
strings []string // either nil or same len and values
cs *ConfigState
}
// newValuesSorter initializes a valuesSorter instance, which holds a set of
// surrogate keys on which the data should be sorted. It uses flags in
// ConfigState to decide if and how to populate those surrogate keys.
func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface {
vs := &valuesSorter{values: values, cs: cs}
if canSortSimply(vs.values[0].Kind()) {
return vs
}
if !cs.DisableMethods {
vs.strings = make([]string, len(values))
for i := range vs.values {
b := bytes.Buffer{}
if !handleMethods(cs, &b, vs.values[i]) {
vs.strings = nil
break
}
vs.strings[i] = b.String()
}
}
if vs.strings == nil && cs.SpewKeys {
vs.strings = make([]string, len(values))
for i := range vs.values {
vs.strings[i] = Sprintf("%#v", vs.values[i].Interface())
}
}
return vs
}
// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted
// directly, or whether it should be considered for sorting by surrogate keys
// (if the ConfigState allows it).
func canSortSimply(kind reflect.Kind) bool {
// This switch parallels valueSortLess, except for the default case.
switch kind {
case reflect.Bool:
return true
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
return true
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
return true
case reflect.Float32, reflect.Float64:
return true
case reflect.String:
return true
case reflect.Uintptr:
return true
case reflect.Array:
return true
}
return false
}
// Len returns the number of values in the slice. It is part of the
// sort.Interface implementation.
func (s *valuesSorter) Len() int {
return len(s.values)
}
// Swap swaps the values at the passed indices. It is part of the
// sort.Interface implementation.
func (s *valuesSorter) Swap(i, j int) {
s.values[i], s.values[j] = s.values[j], s.values[i]
if s.strings != nil {
s.strings[i], s.strings[j] = s.strings[j], s.strings[i]
}
}
// valueSortLess returns whether the first value should sort before the second
// value. It is used by valueSorter.Less as part of the sort.Interface
// implementation.
func valueSortLess(a, b reflect.Value) bool {
switch a.Kind() {
case reflect.Bool:
return !a.Bool() && b.Bool()
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
return a.Int() < b.Int()
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
return a.Uint() < b.Uint()
case reflect.Float32, reflect.Float64:
return a.Float() < b.Float()
case reflect.String:
return a.String() < b.String()
case reflect.Uintptr:
return a.Uint() < b.Uint()
case reflect.Array:
// Compare the contents of both arrays.
l := a.Len()
for i := 0; i < l; i++ {
av := a.Index(i)
bv := b.Index(i)
if av.Interface() == bv.Interface() {
continue
}
return valueSortLess(av, bv)
}
}
return a.String() < b.String()
}
// Less returns whether the value at index i should sort before the
// value at index j. It is part of the sort.Interface implementation.
func (s *valuesSorter) Less(i, j int) bool {
if s.strings == nil {
return valueSortLess(s.values[i], s.values[j])
}
return s.strings[i] < s.strings[j]
}
// sortValues is a sort function that handles both native types and any type that
// can be converted to error or Stringer. Other inputs are sorted according to
// their Value.String() value to ensure display stability.
func sortValues(values []reflect.Value, cs *ConfigState) {
if len(values) == 0 {
return
}
sort.Sort(newValuesSorter(values, cs))
}

306
vendor/github.com/davecgh/go-spew/spew/config.go generated vendored Normal file
View file

@ -0,0 +1,306 @@
/*
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
package spew
import (
"bytes"
"fmt"
"io"
"os"
)
// ConfigState houses the configuration options used by spew to format and
// display values. There is a global instance, Config, that is used to control
// all top-level Formatter and Dump functionality. Each ConfigState instance
// provides methods equivalent to the top-level functions.
//
// The zero value for ConfigState provides no indentation. You would typically
// want to set it to a space or a tab.
//
// Alternatively, you can use NewDefaultConfig to get a ConfigState instance
// with default settings. See the documentation of NewDefaultConfig for default
// values.
type ConfigState struct {
// Indent specifies the string to use for each indentation level. The
// global config instance that all top-level functions use set this to a
// single space by default. If you would like more indentation, you might
// set this to a tab with "\t" or perhaps two spaces with " ".
Indent string
// MaxDepth controls the maximum number of levels to descend into nested
// data structures. The default, 0, means there is no limit.
//
// NOTE: Circular data structures are properly detected, so it is not
// necessary to set this value unless you specifically want to limit deeply
// nested data structures.
MaxDepth int
// DisableMethods specifies whether or not error and Stringer interfaces are
// invoked for types that implement them.
DisableMethods bool
// DisablePointerMethods specifies whether or not to check for and invoke
// error and Stringer interfaces on types which only accept a pointer
// receiver when the current type is not a pointer.
//
// NOTE: This might be an unsafe action since calling one of these methods
// with a pointer receiver could technically mutate the value, however,
// in practice, types which choose to satisify an error or Stringer
// interface with a pointer receiver should not be mutating their state
// inside these interface methods. As a result, this option relies on
// access to the unsafe package, so it will not have any effect when
// running in environments without access to the unsafe package such as
// Google App Engine or with the "safe" build tag specified.
DisablePointerMethods bool
// DisablePointerAddresses specifies whether to disable the printing of
// pointer addresses. This is useful when diffing data structures in tests.
DisablePointerAddresses bool
// DisableCapacities specifies whether to disable the printing of capacities
// for arrays, slices, maps and channels. This is useful when diffing
// data structures in tests.
DisableCapacities bool
// ContinueOnMethod specifies whether or not recursion should continue once
// a custom error or Stringer interface is invoked. The default, false,
// means it will print the results of invoking the custom error or Stringer
// interface and return immediately instead of continuing to recurse into
// the internals of the data type.
//
// NOTE: This flag does not have any effect if method invocation is disabled
// via the DisableMethods or DisablePointerMethods options.
ContinueOnMethod bool
// SortKeys specifies map keys should be sorted before being printed. Use
// this to have a more deterministic, diffable output. Note that only
// native types (bool, int, uint, floats, uintptr and string) and types
// that support the error or Stringer interfaces (if methods are
// enabled) are supported, with other types sorted according to the
// reflect.Value.String() output which guarantees display stability.
SortKeys bool
// SpewKeys specifies that, as a last resort attempt, map keys should
// be spewed to strings and sorted by those strings. This is only
// considered if SortKeys is true.
SpewKeys bool
}
// Config is the active configuration of the top-level functions.
// The configuration can be changed by modifying the contents of spew.Config.
var Config = ConfigState{Indent: " "}
// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
// passed with a Formatter interface returned by c.NewFormatter. It returns
// the formatted string as a value that satisfies error. See NewFormatter
// for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) {
return fmt.Errorf(format, c.convertArgs(a)...)
}
// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
// passed with a Formatter interface returned by c.NewFormatter. It returns
// the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) {
return fmt.Fprint(w, c.convertArgs(a)...)
}
// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
// passed with a Formatter interface returned by c.NewFormatter. It returns
// the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
return fmt.Fprintf(w, format, c.convertArgs(a)...)
}
// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
// passed with a Formatter interface returned by c.NewFormatter. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
return fmt.Fprintln(w, c.convertArgs(a)...)
}
// Print is a wrapper for fmt.Print that treats each argument as if it were
// passed with a Formatter interface returned by c.NewFormatter. It returns
// the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Print(c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Print(a ...interface{}) (n int, err error) {
return fmt.Print(c.convertArgs(a)...)
}
// Printf is a wrapper for fmt.Printf that treats each argument as if it were
// passed with a Formatter interface returned by c.NewFormatter. It returns
// the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) {
return fmt.Printf(format, c.convertArgs(a)...)
}
// Println is a wrapper for fmt.Println that treats each argument as if it were
// passed with a Formatter interface returned by c.NewFormatter. It returns
// the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Println(c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Println(a ...interface{}) (n int, err error) {
return fmt.Println(c.convertArgs(a)...)
}
// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
// passed with a Formatter interface returned by c.NewFormatter. It returns
// the resulting string. See NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Sprint(a ...interface{}) string {
return fmt.Sprint(c.convertArgs(a)...)
}
// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
// passed with a Formatter interface returned by c.NewFormatter. It returns
// the resulting string. See NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Sprintf(format string, a ...interface{}) string {
return fmt.Sprintf(format, c.convertArgs(a)...)
}
// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
// were passed with a Formatter interface returned by c.NewFormatter. It
// returns the resulting string. See NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Sprintln(a ...interface{}) string {
return fmt.Sprintln(c.convertArgs(a)...)
}
/*
NewFormatter returns a custom formatter that satisfies the fmt.Formatter
interface. As a result, it integrates cleanly with standard fmt package
printing functions. The formatter is useful for inline printing of smaller data
types similar to the standard %v format specifier.
The custom formatter only responds to the %v (most compact), %+v (adds pointer
addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb
combinations. Any other verbs such as %x and %q will be sent to the the
standard fmt package for formatting. In addition, the custom formatter ignores
the width and precision arguments (however they will still work on the format
specifiers not handled by the custom formatter).
Typically this function shouldn't be called directly. It is much easier to make
use of the custom formatter by calling one of the convenience functions such as
c.Printf, c.Println, or c.Printf.
*/
func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter {
return newFormatter(c, v)
}
// Fdump formats and displays the passed arguments to io.Writer w. It formats
// exactly the same as Dump.
func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) {
fdump(c, w, a...)
}
/*
Dump displays the passed parameters to standard out with newlines, customizable
indentation, and additional debug information such as complete types and all
pointer addresses used to indirect to the final value. It provides the
following features over the built-in printing facilities provided by the fmt
package:
* Pointers are dereferenced and followed
* Circular data structures are detected and handled properly
* Custom Stringer/error interfaces are optionally invoked, including
on unexported types
* Custom types which only implement the Stringer/error interfaces via
a pointer receiver are optionally invoked when passing non-pointer
variables
* Byte arrays and slices are dumped like the hexdump -C command which
includes offsets, byte values in hex, and ASCII output
The configuration options are controlled by modifying the public members
of c. See ConfigState for options documentation.
See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
get the formatted result as a string.
*/
func (c *ConfigState) Dump(a ...interface{}) {
fdump(c, os.Stdout, a...)
}
// Sdump returns a string with the passed arguments formatted exactly the same
// as Dump.
func (c *ConfigState) Sdump(a ...interface{}) string {
var buf bytes.Buffer
fdump(c, &buf, a...)
return buf.String()
}
// convertArgs accepts a slice of arguments and returns a slice of the same
// length with each argument converted to a spew Formatter interface using
// the ConfigState associated with s.
func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) {
formatters = make([]interface{}, len(args))
for index, arg := range args {
formatters[index] = newFormatter(c, arg)
}
return formatters
}
// NewDefaultConfig returns a ConfigState with the following default settings.
//
// Indent: " "
// MaxDepth: 0
// DisableMethods: false
// DisablePointerMethods: false
// ContinueOnMethod: false
// SortKeys: false
func NewDefaultConfig() *ConfigState {
return &ConfigState{Indent: " "}
}

211
vendor/github.com/davecgh/go-spew/spew/doc.go generated vendored Normal file
View file

@ -0,0 +1,211 @@
/*
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/*
Package spew implements a deep pretty printer for Go data structures to aid in
debugging.
A quick overview of the additional features spew provides over the built-in
printing facilities for Go data types are as follows:
* Pointers are dereferenced and followed
* Circular data structures are detected and handled properly
* Custom Stringer/error interfaces are optionally invoked, including
on unexported types
* Custom types which only implement the Stringer/error interfaces via
a pointer receiver are optionally invoked when passing non-pointer
variables
* Byte arrays and slices are dumped like the hexdump -C command which
includes offsets, byte values in hex, and ASCII output (only when using
Dump style)
There are two different approaches spew allows for dumping Go data structures:
* Dump style which prints with newlines, customizable indentation,
and additional debug information such as types and all pointer addresses
used to indirect to the final value
* A custom Formatter interface that integrates cleanly with the standard fmt
package and replaces %v, %+v, %#v, and %#+v to provide inline printing
similar to the default %v while providing the additional functionality
outlined above and passing unsupported format verbs such as %x and %q
along to fmt
Quick Start
This section demonstrates how to quickly get started with spew. See the
sections below for further details on formatting and configuration options.
To dump a variable with full newlines, indentation, type, and pointer
information use Dump, Fdump, or Sdump:
spew.Dump(myVar1, myVar2, ...)
spew.Fdump(someWriter, myVar1, myVar2, ...)
str := spew.Sdump(myVar1, myVar2, ...)
Alternatively, if you would prefer to use format strings with a compacted inline
printing style, use the convenience wrappers Printf, Fprintf, etc with
%v (most compact), %+v (adds pointer addresses), %#v (adds types), or
%#+v (adds types and pointer addresses):
spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
Configuration Options
Configuration of spew is handled by fields in the ConfigState type. For
convenience, all of the top-level functions use a global state available
via the spew.Config global.
It is also possible to create a ConfigState instance that provides methods
equivalent to the top-level functions. This allows concurrent configuration
options. See the ConfigState documentation for more details.
The following configuration options are available:
* Indent
String to use for each indentation level for Dump functions.
It is a single space by default. A popular alternative is "\t".
* MaxDepth
Maximum number of levels to descend into nested data structures.
There is no limit by default.
* DisableMethods
Disables invocation of error and Stringer interface methods.
Method invocation is enabled by default.
* DisablePointerMethods
Disables invocation of error and Stringer interface methods on types
which only accept pointer receivers from non-pointer variables.
Pointer method invocation is enabled by default.
* DisablePointerAddresses
DisablePointerAddresses specifies whether to disable the printing of
pointer addresses. This is useful when diffing data structures in tests.
* DisableCapacities
DisableCapacities specifies whether to disable the printing of
capacities for arrays, slices, maps and channels. This is useful when
diffing data structures in tests.
* ContinueOnMethod
Enables recursion into types after invoking error and Stringer interface
methods. Recursion after method invocation is disabled by default.
* SortKeys
Specifies map keys should be sorted before being printed. Use
this to have a more deterministic, diffable output. Note that
only native types (bool, int, uint, floats, uintptr and string)
and types which implement error or Stringer interfaces are
supported with other types sorted according to the
reflect.Value.String() output which guarantees display
stability. Natural map order is used by default.
* SpewKeys
Specifies that, as a last resort attempt, map keys should be
spewed to strings and sorted by those strings. This is only
considered if SortKeys is true.
Dump Usage
Simply call spew.Dump with a list of variables you want to dump:
spew.Dump(myVar1, myVar2, ...)
You may also call spew.Fdump if you would prefer to output to an arbitrary
io.Writer. For example, to dump to standard error:
spew.Fdump(os.Stderr, myVar1, myVar2, ...)
A third option is to call spew.Sdump to get the formatted output as a string:
str := spew.Sdump(myVar1, myVar2, ...)
Sample Dump Output
See the Dump example for details on the setup of the types and variables being
shown here.
(main.Foo) {
unexportedField: (*main.Bar)(0xf84002e210)({
flag: (main.Flag) flagTwo,
data: (uintptr) <nil>
}),
ExportedField: (map[interface {}]interface {}) (len=1) {
(string) (len=3) "one": (bool) true
}
}
Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C
command as shown.
([]uint8) (len=32 cap=32) {
00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... |
00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0|
00000020 31 32 |12|
}
Custom Formatter
Spew provides a custom formatter that implements the fmt.Formatter interface
so that it integrates cleanly with standard fmt package printing functions. The
formatter is useful for inline printing of smaller data types similar to the
standard %v format specifier.
The custom formatter only responds to the %v (most compact), %+v (adds pointer
addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
combinations. Any other verbs such as %x and %q will be sent to the the
standard fmt package for formatting. In addition, the custom formatter ignores
the width and precision arguments (however they will still work on the format
specifiers not handled by the custom formatter).
Custom Formatter Usage
The simplest way to make use of the spew custom formatter is to call one of the
convenience functions such as spew.Printf, spew.Println, or spew.Printf. The
functions have syntax you are most likely already familiar with:
spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
spew.Println(myVar, myVar2)
spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
See the Index for the full list convenience functions.
Sample Formatter Output
Double pointer to a uint8:
%v: <**>5
%+v: <**>(0xf8400420d0->0xf8400420c8)5
%#v: (**uint8)5
%#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5
Pointer to circular struct with a uint8 field and a pointer to itself:
%v: <*>{1 <*><shown>}
%+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)<shown>}
%#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)<shown>}
%#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)<shown>}
See the Printf example for details on the setup of variables being shown
here.
Errors
Since it is possible for custom Stringer/error interfaces to panic, spew
detects them and handles them internally by printing the panic information
inline with the output. Since spew is intended to provide deep pretty printing
capabilities on structures, it intentionally does not return any errors.
*/
package spew

509
vendor/github.com/davecgh/go-spew/spew/dump.go generated vendored Normal file
View file

@ -0,0 +1,509 @@
/*
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
package spew
import (
"bytes"
"encoding/hex"
"fmt"
"io"
"os"
"reflect"
"regexp"
"strconv"
"strings"
)
var (
// uint8Type is a reflect.Type representing a uint8. It is used to
// convert cgo types to uint8 slices for hexdumping.
uint8Type = reflect.TypeOf(uint8(0))
// cCharRE is a regular expression that matches a cgo char.
// It is used to detect character arrays to hexdump them.
cCharRE = regexp.MustCompile(`^.*\._Ctype_char$`)
// cUnsignedCharRE is a regular expression that matches a cgo unsigned
// char. It is used to detect unsigned character arrays to hexdump
// them.
cUnsignedCharRE = regexp.MustCompile(`^.*\._Ctype_unsignedchar$`)
// cUint8tCharRE is a regular expression that matches a cgo uint8_t.
// It is used to detect uint8_t arrays to hexdump them.
cUint8tCharRE = regexp.MustCompile(`^.*\._Ctype_uint8_t$`)
)
// dumpState contains information about the state of a dump operation.
type dumpState struct {
w io.Writer
depth int
pointers map[uintptr]int
ignoreNextType bool
ignoreNextIndent bool
cs *ConfigState
}
// indent performs indentation according to the depth level and cs.Indent
// option.
func (d *dumpState) indent() {
if d.ignoreNextIndent {
d.ignoreNextIndent = false
return
}
d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth))
}
// unpackValue returns values inside of non-nil interfaces when possible.
// This is useful for data types like structs, arrays, slices, and maps which
// can contain varying types packed inside an interface.
func (d *dumpState) unpackValue(v reflect.Value) reflect.Value {
if v.Kind() == reflect.Interface && !v.IsNil() {
v = v.Elem()
}
return v
}
// dumpPtr handles formatting of pointers by indirecting them as necessary.
func (d *dumpState) dumpPtr(v reflect.Value) {
// Remove pointers at or below the current depth from map used to detect
// circular refs.
for k, depth := range d.pointers {
if depth >= d.depth {
delete(d.pointers, k)
}
}
// Keep list of all dereferenced pointers to show later.
pointerChain := make([]uintptr, 0)
// Figure out how many levels of indirection there are by dereferencing
// pointers and unpacking interfaces down the chain while detecting circular
// references.
nilFound := false
cycleFound := false
indirects := 0
ve := v
for ve.Kind() == reflect.Ptr {
if ve.IsNil() {
nilFound = true
break
}
indirects++
addr := ve.Pointer()
pointerChain = append(pointerChain, addr)
if pd, ok := d.pointers[addr]; ok && pd < d.depth {
cycleFound = true
indirects--
break
}
d.pointers[addr] = d.depth
ve = ve.Elem()
if ve.Kind() == reflect.Interface {
if ve.IsNil() {
nilFound = true
break
}
ve = ve.Elem()
}
}
// Display type information.
d.w.Write(openParenBytes)
d.w.Write(bytes.Repeat(asteriskBytes, indirects))
d.w.Write([]byte(ve.Type().String()))
d.w.Write(closeParenBytes)
// Display pointer information.
if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 {
d.w.Write(openParenBytes)
for i, addr := range pointerChain {
if i > 0 {
d.w.Write(pointerChainBytes)
}
printHexPtr(d.w, addr)
}
d.w.Write(closeParenBytes)
}
// Display dereferenced value.
d.w.Write(openParenBytes)
switch {
case nilFound:
d.w.Write(nilAngleBytes)
case cycleFound:
d.w.Write(circularBytes)
default:
d.ignoreNextType = true
d.dump(ve)
}
d.w.Write(closeParenBytes)
}
// dumpSlice handles formatting of arrays and slices. Byte (uint8 under
// reflection) arrays and slices are dumped in hexdump -C fashion.
func (d *dumpState) dumpSlice(v reflect.Value) {
// Determine whether this type should be hex dumped or not. Also,
// for types which should be hexdumped, try to use the underlying data
// first, then fall back to trying to convert them to a uint8 slice.
var buf []uint8
doConvert := false
doHexDump := false
numEntries := v.Len()
if numEntries > 0 {
vt := v.Index(0).Type()
vts := vt.String()
switch {
// C types that need to be converted.
case cCharRE.MatchString(vts):
fallthrough
case cUnsignedCharRE.MatchString(vts):
fallthrough
case cUint8tCharRE.MatchString(vts):
doConvert = true
// Try to use existing uint8 slices and fall back to converting
// and copying if that fails.
case vt.Kind() == reflect.Uint8:
// We need an addressable interface to convert the type
// to a byte slice. However, the reflect package won't
// give us an interface on certain things like
// unexported struct fields in order to enforce
// visibility rules. We use unsafe, when available, to
// bypass these restrictions since this package does not
// mutate the values.
vs := v
if !vs.CanInterface() || !vs.CanAddr() {
vs = unsafeReflectValue(vs)
}
if !UnsafeDisabled {
vs = vs.Slice(0, numEntries)
// Use the existing uint8 slice if it can be
// type asserted.
iface := vs.Interface()
if slice, ok := iface.([]uint8); ok {
buf = slice
doHexDump = true
break
}
}
// The underlying data needs to be converted if it can't
// be type asserted to a uint8 slice.
doConvert = true
}
// Copy and convert the underlying type if needed.
if doConvert && vt.ConvertibleTo(uint8Type) {
// Convert and copy each element into a uint8 byte
// slice.
buf = make([]uint8, numEntries)
for i := 0; i < numEntries; i++ {
vv := v.Index(i)
buf[i] = uint8(vv.Convert(uint8Type).Uint())
}
doHexDump = true
}
}
// Hexdump the entire slice as needed.
if doHexDump {
indent := strings.Repeat(d.cs.Indent, d.depth)
str := indent + hex.Dump(buf)
str = strings.Replace(str, "\n", "\n"+indent, -1)
str = strings.TrimRight(str, d.cs.Indent)
d.w.Write([]byte(str))
return
}
// Recursively call dump for each item.
for i := 0; i < numEntries; i++ {
d.dump(d.unpackValue(v.Index(i)))
if i < (numEntries - 1) {
d.w.Write(commaNewlineBytes)
} else {
d.w.Write(newlineBytes)
}
}
}
// dump is the main workhorse for dumping a value. It uses the passed reflect
// value to figure out what kind of object we are dealing with and formats it
// appropriately. It is a recursive function, however circular data structures
// are detected and handled properly.
func (d *dumpState) dump(v reflect.Value) {
// Handle invalid reflect values immediately.
kind := v.Kind()
if kind == reflect.Invalid {
d.w.Write(invalidAngleBytes)
return
}
// Handle pointers specially.
if kind == reflect.Ptr {
d.indent()
d.dumpPtr(v)
return
}
// Print type information unless already handled elsewhere.
if !d.ignoreNextType {
d.indent()
d.w.Write(openParenBytes)
d.w.Write([]byte(v.Type().String()))
d.w.Write(closeParenBytes)
d.w.Write(spaceBytes)
}
d.ignoreNextType = false
// Display length and capacity if the built-in len and cap functions
// work with the value's kind and the len/cap itself is non-zero.
valueLen, valueCap := 0, 0
switch v.Kind() {
case reflect.Array, reflect.Slice, reflect.Chan:
valueLen, valueCap = v.Len(), v.Cap()
case reflect.Map, reflect.String:
valueLen = v.Len()
}
if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 {
d.w.Write(openParenBytes)
if valueLen != 0 {
d.w.Write(lenEqualsBytes)
printInt(d.w, int64(valueLen), 10)
}
if !d.cs.DisableCapacities && valueCap != 0 {
if valueLen != 0 {
d.w.Write(spaceBytes)
}
d.w.Write(capEqualsBytes)
printInt(d.w, int64(valueCap), 10)
}
d.w.Write(closeParenBytes)
d.w.Write(spaceBytes)
}
// Call Stringer/error interfaces if they exist and the handle methods flag
// is enabled
if !d.cs.DisableMethods {
if (kind != reflect.Invalid) && (kind != reflect.Interface) {
if handled := handleMethods(d.cs, d.w, v); handled {
return
}
}
}
switch kind {
case reflect.Invalid:
// Do nothing. We should never get here since invalid has already
// been handled above.
case reflect.Bool:
printBool(d.w, v.Bool())
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
printInt(d.w, v.Int(), 10)
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
printUint(d.w, v.Uint(), 10)
case reflect.Float32:
printFloat(d.w, v.Float(), 32)
case reflect.Float64:
printFloat(d.w, v.Float(), 64)
case reflect.Complex64:
printComplex(d.w, v.Complex(), 32)
case reflect.Complex128:
printComplex(d.w, v.Complex(), 64)
case reflect.Slice:
if v.IsNil() {
d.w.Write(nilAngleBytes)
break
}
fallthrough
case reflect.Array:
d.w.Write(openBraceNewlineBytes)
d.depth++
if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
d.indent()
d.w.Write(maxNewlineBytes)
} else {
d.dumpSlice(v)
}
d.depth--
d.indent()
d.w.Write(closeBraceBytes)
case reflect.String:
d.w.Write([]byte(strconv.Quote(v.String())))
case reflect.Interface:
// The only time we should get here is for nil interfaces due to
// unpackValue calls.
if v.IsNil() {
d.w.Write(nilAngleBytes)
}
case reflect.Ptr:
// Do nothing. We should never get here since pointers have already
// been handled above.
case reflect.Map:
// nil maps should be indicated as different than empty maps
if v.IsNil() {
d.w.Write(nilAngleBytes)
break
}
d.w.Write(openBraceNewlineBytes)
d.depth++
if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
d.indent()
d.w.Write(maxNewlineBytes)
} else {
numEntries := v.Len()
keys := v.MapKeys()
if d.cs.SortKeys {
sortValues(keys, d.cs)
}
for i, key := range keys {
d.dump(d.unpackValue(key))
d.w.Write(colonSpaceBytes)
d.ignoreNextIndent = true
d.dump(d.unpackValue(v.MapIndex(key)))
if i < (numEntries - 1) {
d.w.Write(commaNewlineBytes)
} else {
d.w.Write(newlineBytes)
}
}
}
d.depth--
d.indent()
d.w.Write(closeBraceBytes)
case reflect.Struct:
d.w.Write(openBraceNewlineBytes)
d.depth++
if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
d.indent()
d.w.Write(maxNewlineBytes)
} else {
vt := v.Type()
numFields := v.NumField()
for i := 0; i < numFields; i++ {
d.indent()
vtf := vt.Field(i)
d.w.Write([]byte(vtf.Name))
d.w.Write(colonSpaceBytes)
d.ignoreNextIndent = true
d.dump(d.unpackValue(v.Field(i)))
if i < (numFields - 1) {
d.w.Write(commaNewlineBytes)
} else {
d.w.Write(newlineBytes)
}
}
}
d.depth--
d.indent()
d.w.Write(closeBraceBytes)
case reflect.Uintptr:
printHexPtr(d.w, uintptr(v.Uint()))
case reflect.UnsafePointer, reflect.Chan, reflect.Func:
printHexPtr(d.w, v.Pointer())
// There were not any other types at the time this code was written, but
// fall back to letting the default fmt package handle it in case any new
// types are added.
default:
if v.CanInterface() {
fmt.Fprintf(d.w, "%v", v.Interface())
} else {
fmt.Fprintf(d.w, "%v", v.String())
}
}
}
// fdump is a helper function to consolidate the logic from the various public
// methods which take varying writers and config states.
func fdump(cs *ConfigState, w io.Writer, a ...interface{}) {
for _, arg := range a {
if arg == nil {
w.Write(interfaceBytes)
w.Write(spaceBytes)
w.Write(nilAngleBytes)
w.Write(newlineBytes)
continue
}
d := dumpState{w: w, cs: cs}
d.pointers = make(map[uintptr]int)
d.dump(reflect.ValueOf(arg))
d.w.Write(newlineBytes)
}
}
// Fdump formats and displays the passed arguments to io.Writer w. It formats
// exactly the same as Dump.
func Fdump(w io.Writer, a ...interface{}) {
fdump(&Config, w, a...)
}
// Sdump returns a string with the passed arguments formatted exactly the same
// as Dump.
func Sdump(a ...interface{}) string {
var buf bytes.Buffer
fdump(&Config, &buf, a...)
return buf.String()
}
/*
Dump displays the passed parameters to standard out with newlines, customizable
indentation, and additional debug information such as complete types and all
pointer addresses used to indirect to the final value. It provides the
following features over the built-in printing facilities provided by the fmt
package:
* Pointers are dereferenced and followed
* Circular data structures are detected and handled properly
* Custom Stringer/error interfaces are optionally invoked, including
on unexported types
* Custom types which only implement the Stringer/error interfaces via
a pointer receiver are optionally invoked when passing non-pointer
variables
* Byte arrays and slices are dumped like the hexdump -C command which
includes offsets, byte values in hex, and ASCII output
The configuration options are controlled by an exported package global,
spew.Config. See ConfigState for options documentation.
See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
get the formatted result as a string.
*/
func Dump(a ...interface{}) {
fdump(&Config, os.Stdout, a...)
}

419
vendor/github.com/davecgh/go-spew/spew/format.go generated vendored Normal file
View file

@ -0,0 +1,419 @@
/*
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
package spew
import (
"bytes"
"fmt"
"reflect"
"strconv"
"strings"
)
// supportedFlags is a list of all the character flags supported by fmt package.
const supportedFlags = "0-+# "
// formatState implements the fmt.Formatter interface and contains information
// about the state of a formatting operation. The NewFormatter function can
// be used to get a new Formatter which can be used directly as arguments
// in standard fmt package printing calls.
type formatState struct {
value interface{}
fs fmt.State
depth int
pointers map[uintptr]int
ignoreNextType bool
cs *ConfigState
}
// buildDefaultFormat recreates the original format string without precision
// and width information to pass in to fmt.Sprintf in the case of an
// unrecognized type. Unless new types are added to the language, this
// function won't ever be called.
func (f *formatState) buildDefaultFormat() (format string) {
buf := bytes.NewBuffer(percentBytes)
for _, flag := range supportedFlags {
if f.fs.Flag(int(flag)) {
buf.WriteRune(flag)
}
}
buf.WriteRune('v')
format = buf.String()
return format
}
// constructOrigFormat recreates the original format string including precision
// and width information to pass along to the standard fmt package. This allows
// automatic deferral of all format strings this package doesn't support.
func (f *formatState) constructOrigFormat(verb rune) (format string) {
buf := bytes.NewBuffer(percentBytes)
for _, flag := range supportedFlags {
if f.fs.Flag(int(flag)) {
buf.WriteRune(flag)
}
}
if width, ok := f.fs.Width(); ok {
buf.WriteString(strconv.Itoa(width))
}
if precision, ok := f.fs.Precision(); ok {
buf.Write(precisionBytes)
buf.WriteString(strconv.Itoa(precision))
}
buf.WriteRune(verb)
format = buf.String()
return format
}
// unpackValue returns values inside of non-nil interfaces when possible and
// ensures that types for values which have been unpacked from an interface
// are displayed when the show types flag is also set.
// This is useful for data types like structs, arrays, slices, and maps which
// can contain varying types packed inside an interface.
func (f *formatState) unpackValue(v reflect.Value) reflect.Value {
if v.Kind() == reflect.Interface {
f.ignoreNextType = false
if !v.IsNil() {
v = v.Elem()
}
}
return v
}
// formatPtr handles formatting of pointers by indirecting them as necessary.
func (f *formatState) formatPtr(v reflect.Value) {
// Display nil if top level pointer is nil.
showTypes := f.fs.Flag('#')
if v.IsNil() && (!showTypes || f.ignoreNextType) {
f.fs.Write(nilAngleBytes)
return
}
// Remove pointers at or below the current depth from map used to detect
// circular refs.
for k, depth := range f.pointers {
if depth >= f.depth {
delete(f.pointers, k)
}
}
// Keep list of all dereferenced pointers to possibly show later.
pointerChain := make([]uintptr, 0)
// Figure out how many levels of indirection there are by derferencing
// pointers and unpacking interfaces down the chain while detecting circular
// references.
nilFound := false
cycleFound := false
indirects := 0
ve := v
for ve.Kind() == reflect.Ptr {
if ve.IsNil() {
nilFound = true
break
}
indirects++
addr := ve.Pointer()
pointerChain = append(pointerChain, addr)
if pd, ok := f.pointers[addr]; ok && pd < f.depth {
cycleFound = true
indirects--
break
}
f.pointers[addr] = f.depth
ve = ve.Elem()
if ve.Kind() == reflect.Interface {
if ve.IsNil() {
nilFound = true
break
}
ve = ve.Elem()
}
}
// Display type or indirection level depending on flags.
if showTypes && !f.ignoreNextType {
f.fs.Write(openParenBytes)
f.fs.Write(bytes.Repeat(asteriskBytes, indirects))
f.fs.Write([]byte(ve.Type().String()))
f.fs.Write(closeParenBytes)
} else {
if nilFound || cycleFound {
indirects += strings.Count(ve.Type().String(), "*")
}
f.fs.Write(openAngleBytes)
f.fs.Write([]byte(strings.Repeat("*", indirects)))
f.fs.Write(closeAngleBytes)
}
// Display pointer information depending on flags.
if f.fs.Flag('+') && (len(pointerChain) > 0) {
f.fs.Write(openParenBytes)
for i, addr := range pointerChain {
if i > 0 {
f.fs.Write(pointerChainBytes)
}
printHexPtr(f.fs, addr)
}
f.fs.Write(closeParenBytes)
}
// Display dereferenced value.
switch {
case nilFound:
f.fs.Write(nilAngleBytes)
case cycleFound:
f.fs.Write(circularShortBytes)
default:
f.ignoreNextType = true
f.format(ve)
}
}
// format is the main workhorse for providing the Formatter interface. It
// uses the passed reflect value to figure out what kind of object we are
// dealing with and formats it appropriately. It is a recursive function,
// however circular data structures are detected and handled properly.
func (f *formatState) format(v reflect.Value) {
// Handle invalid reflect values immediately.
kind := v.Kind()
if kind == reflect.Invalid {
f.fs.Write(invalidAngleBytes)
return
}
// Handle pointers specially.
if kind == reflect.Ptr {
f.formatPtr(v)
return
}
// Print type information unless already handled elsewhere.
if !f.ignoreNextType && f.fs.Flag('#') {
f.fs.Write(openParenBytes)
f.fs.Write([]byte(v.Type().String()))
f.fs.Write(closeParenBytes)
}
f.ignoreNextType = false
// Call Stringer/error interfaces if they exist and the handle methods
// flag is enabled.
if !f.cs.DisableMethods {
if (kind != reflect.Invalid) && (kind != reflect.Interface) {
if handled := handleMethods(f.cs, f.fs, v); handled {
return
}
}
}
switch kind {
case reflect.Invalid:
// Do nothing. We should never get here since invalid has already
// been handled above.
case reflect.Bool:
printBool(f.fs, v.Bool())
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
printInt(f.fs, v.Int(), 10)
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
printUint(f.fs, v.Uint(), 10)
case reflect.Float32:
printFloat(f.fs, v.Float(), 32)
case reflect.Float64:
printFloat(f.fs, v.Float(), 64)
case reflect.Complex64:
printComplex(f.fs, v.Complex(), 32)
case reflect.Complex128:
printComplex(f.fs, v.Complex(), 64)
case reflect.Slice:
if v.IsNil() {
f.fs.Write(nilAngleBytes)
break
}
fallthrough
case reflect.Array:
f.fs.Write(openBracketBytes)
f.depth++
if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
f.fs.Write(maxShortBytes)
} else {
numEntries := v.Len()
for i := 0; i < numEntries; i++ {
if i > 0 {
f.fs.Write(spaceBytes)
}
f.ignoreNextType = true
f.format(f.unpackValue(v.Index(i)))
}
}
f.depth--
f.fs.Write(closeBracketBytes)
case reflect.String:
f.fs.Write([]byte(v.String()))
case reflect.Interface:
// The only time we should get here is for nil interfaces due to
// unpackValue calls.
if v.IsNil() {
f.fs.Write(nilAngleBytes)
}
case reflect.Ptr:
// Do nothing. We should never get here since pointers have already
// been handled above.
case reflect.Map:
// nil maps should be indicated as different than empty maps
if v.IsNil() {
f.fs.Write(nilAngleBytes)
break
}
f.fs.Write(openMapBytes)
f.depth++
if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
f.fs.Write(maxShortBytes)
} else {
keys := v.MapKeys()
if f.cs.SortKeys {
sortValues(keys, f.cs)
}
for i, key := range keys {
if i > 0 {
f.fs.Write(spaceBytes)
}
f.ignoreNextType = true
f.format(f.unpackValue(key))
f.fs.Write(colonBytes)
f.ignoreNextType = true
f.format(f.unpackValue(v.MapIndex(key)))
}
}
f.depth--
f.fs.Write(closeMapBytes)
case reflect.Struct:
numFields := v.NumField()
f.fs.Write(openBraceBytes)
f.depth++
if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
f.fs.Write(maxShortBytes)
} else {
vt := v.Type()
for i := 0; i < numFields; i++ {
if i > 0 {
f.fs.Write(spaceBytes)
}
vtf := vt.Field(i)
if f.fs.Flag('+') || f.fs.Flag('#') {
f.fs.Write([]byte(vtf.Name))
f.fs.Write(colonBytes)
}
f.format(f.unpackValue(v.Field(i)))
}
}
f.depth--
f.fs.Write(closeBraceBytes)
case reflect.Uintptr:
printHexPtr(f.fs, uintptr(v.Uint()))
case reflect.UnsafePointer, reflect.Chan, reflect.Func:
printHexPtr(f.fs, v.Pointer())
// There were not any other types at the time this code was written, but
// fall back to letting the default fmt package handle it if any get added.
default:
format := f.buildDefaultFormat()
if v.CanInterface() {
fmt.Fprintf(f.fs, format, v.Interface())
} else {
fmt.Fprintf(f.fs, format, v.String())
}
}
}
// Format satisfies the fmt.Formatter interface. See NewFormatter for usage
// details.
func (f *formatState) Format(fs fmt.State, verb rune) {
f.fs = fs
// Use standard formatting for verbs that are not v.
if verb != 'v' {
format := f.constructOrigFormat(verb)
fmt.Fprintf(fs, format, f.value)
return
}
if f.value == nil {
if fs.Flag('#') {
fs.Write(interfaceBytes)
}
fs.Write(nilAngleBytes)
return
}
f.format(reflect.ValueOf(f.value))
}
// newFormatter is a helper function to consolidate the logic from the various
// public methods which take varying config states.
func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter {
fs := &formatState{value: v, cs: cs}
fs.pointers = make(map[uintptr]int)
return fs
}
/*
NewFormatter returns a custom formatter that satisfies the fmt.Formatter
interface. As a result, it integrates cleanly with standard fmt package
printing functions. The formatter is useful for inline printing of smaller data
types similar to the standard %v format specifier.
The custom formatter only responds to the %v (most compact), %+v (adds pointer
addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
combinations. Any other verbs such as %x and %q will be sent to the the
standard fmt package for formatting. In addition, the custom formatter ignores
the width and precision arguments (however they will still work on the format
specifiers not handled by the custom formatter).
Typically this function shouldn't be called directly. It is much easier to make
use of the custom formatter by calling one of the convenience functions such as
Printf, Println, or Fprintf.
*/
func NewFormatter(v interface{}) fmt.Formatter {
return newFormatter(&Config, v)
}

148
vendor/github.com/davecgh/go-spew/spew/spew.go generated vendored Normal file
View file

@ -0,0 +1,148 @@
/*
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
package spew
import (
"fmt"
"io"
)
// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
// passed with a default Formatter interface returned by NewFormatter. It
// returns the formatted string as a value that satisfies error. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b))
func Errorf(format string, a ...interface{}) (err error) {
return fmt.Errorf(format, convertArgs(a)...)
}
// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
// passed with a default Formatter interface returned by NewFormatter. It
// returns the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b))
func Fprint(w io.Writer, a ...interface{}) (n int, err error) {
return fmt.Fprint(w, convertArgs(a)...)
}
// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
// passed with a default Formatter interface returned by NewFormatter. It
// returns the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b))
func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
return fmt.Fprintf(w, format, convertArgs(a)...)
}
// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
// passed with a default Formatter interface returned by NewFormatter. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b))
func Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
return fmt.Fprintln(w, convertArgs(a)...)
}
// Print is a wrapper for fmt.Print that treats each argument as if it were
// passed with a default Formatter interface returned by NewFormatter. It
// returns the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b))
func Print(a ...interface{}) (n int, err error) {
return fmt.Print(convertArgs(a)...)
}
// Printf is a wrapper for fmt.Printf that treats each argument as if it were
// passed with a default Formatter interface returned by NewFormatter. It
// returns the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b))
func Printf(format string, a ...interface{}) (n int, err error) {
return fmt.Printf(format, convertArgs(a)...)
}
// Println is a wrapper for fmt.Println that treats each argument as if it were
// passed with a default Formatter interface returned by NewFormatter. It
// returns the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b))
func Println(a ...interface{}) (n int, err error) {
return fmt.Println(convertArgs(a)...)
}
// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
// passed with a default Formatter interface returned by NewFormatter. It
// returns the resulting string. See NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b))
func Sprint(a ...interface{}) string {
return fmt.Sprint(convertArgs(a)...)
}
// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
// passed with a default Formatter interface returned by NewFormatter. It
// returns the resulting string. See NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b))
func Sprintf(format string, a ...interface{}) string {
return fmt.Sprintf(format, convertArgs(a)...)
}
// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
// were passed with a default Formatter interface returned by NewFormatter. It
// returns the resulting string. See NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b))
func Sprintln(a ...interface{}) string {
return fmt.Sprintln(convertArgs(a)...)
}
// convertArgs accepts a slice of arguments and returns a slice of the same
// length with each argument converted to a default spew Formatter interface.
func convertArgs(args []interface{}) (formatters []interface{}) {
formatters = make([]interface{}, len(args))
for index, arg := range args {
formatters[index] = NewFormatter(arg)
}
return formatters
}

View file

@ -1,5 +0,0 @@
[568].out
_go*
_test*
_obj
/.idea

9
vendor/github.com/kr/pretty/Readme generated vendored
View file

@ -1,9 +0,0 @@
package pretty
import "github.com/kr/pretty"
Package pretty provides pretty-printing for Go values.
Documentation
http://godoc.org/github.com/kr/pretty

295
vendor/github.com/kr/pretty/diff.go generated vendored
View file

@ -1,295 +0,0 @@
package pretty
import (
"fmt"
"io"
"reflect"
)
type sbuf []string
func (p *sbuf) Printf(format string, a ...interface{}) {
s := fmt.Sprintf(format, a...)
*p = append(*p, s)
}
// Diff returns a slice where each element describes
// a difference between a and b.
func Diff(a, b interface{}) (desc []string) {
Pdiff((*sbuf)(&desc), a, b)
return desc
}
// wprintfer calls Fprintf on w for each Printf call
// with a trailing newline.
type wprintfer struct{ w io.Writer }
func (p *wprintfer) Printf(format string, a ...interface{}) {
fmt.Fprintf(p.w, format+"\n", a...)
}
// Fdiff writes to w a description of the differences between a and b.
func Fdiff(w io.Writer, a, b interface{}) {
Pdiff(&wprintfer{w}, a, b)
}
type Printfer interface {
Printf(format string, a ...interface{})
}
// Pdiff prints to p a description of the differences between a and b.
// It calls Printf once for each difference, with no trailing newline.
// The standard library log.Logger is a Printfer.
func Pdiff(p Printfer, a, b interface{}) {
d := diffPrinter{
w: p,
aVisited: make(map[visit]visit),
bVisited: make(map[visit]visit),
}
d.diff(reflect.ValueOf(a), reflect.ValueOf(b))
}
type Logfer interface {
Logf(format string, a ...interface{})
}
// logprintfer calls Fprintf on w for each Printf call
// with a trailing newline.
type logprintfer struct{ l Logfer }
func (p *logprintfer) Printf(format string, a ...interface{}) {
p.l.Logf(format, a...)
}
// Ldiff prints to l a description of the differences between a and b.
// It calls Logf once for each difference, with no trailing newline.
// The standard library testing.T and testing.B are Logfers.
func Ldiff(l Logfer, a, b interface{}) {
Pdiff(&logprintfer{l}, a, b)
}
type diffPrinter struct {
w Printfer
l string // label
aVisited map[visit]visit
bVisited map[visit]visit
}
func (w diffPrinter) printf(f string, a ...interface{}) {
var l string
if w.l != "" {
l = w.l + ": "
}
w.w.Printf(l+f, a...)
}
func (w diffPrinter) diff(av, bv reflect.Value) {
if !av.IsValid() && bv.IsValid() {
w.printf("nil != %# v", formatter{v: bv, quote: true})
return
}
if av.IsValid() && !bv.IsValid() {
w.printf("%# v != nil", formatter{v: av, quote: true})
return
}
if !av.IsValid() && !bv.IsValid() {
return
}
at := av.Type()
bt := bv.Type()
if at != bt {
w.printf("%v != %v", at, bt)
return
}
if av.CanAddr() && bv.CanAddr() {
avis := visit{av.UnsafeAddr(), at}
bvis := visit{bv.UnsafeAddr(), bt}
var cycle bool
// Have we seen this value before?
if vis, ok := w.aVisited[avis]; ok {
cycle = true
if vis != bvis {
w.printf("%# v (previously visited) != %# v", formatter{v: av, quote: true}, formatter{v: bv, quote: true})
}
} else if _, ok := w.bVisited[bvis]; ok {
cycle = true
w.printf("%# v != %# v (previously visited)", formatter{v: av, quote: true}, formatter{v: bv, quote: true})
}
w.aVisited[avis] = bvis
w.bVisited[bvis] = avis
if cycle {
return
}
}
switch kind := at.Kind(); kind {
case reflect.Bool:
if a, b := av.Bool(), bv.Bool(); a != b {
w.printf("%v != %v", a, b)
}
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
if a, b := av.Int(), bv.Int(); a != b {
w.printf("%d != %d", a, b)
}
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
if a, b := av.Uint(), bv.Uint(); a != b {
w.printf("%d != %d", a, b)
}
case reflect.Float32, reflect.Float64:
if a, b := av.Float(), bv.Float(); a != b {
w.printf("%v != %v", a, b)
}
case reflect.Complex64, reflect.Complex128:
if a, b := av.Complex(), bv.Complex(); a != b {
w.printf("%v != %v", a, b)
}
case reflect.Array:
n := av.Len()
for i := 0; i < n; i++ {
w.relabel(fmt.Sprintf("[%d]", i)).diff(av.Index(i), bv.Index(i))
}
case reflect.Chan, reflect.Func, reflect.UnsafePointer:
if a, b := av.Pointer(), bv.Pointer(); a != b {
w.printf("%#x != %#x", a, b)
}
case reflect.Interface:
w.diff(av.Elem(), bv.Elem())
case reflect.Map:
ak, both, bk := keyDiff(av.MapKeys(), bv.MapKeys())
for _, k := range ak {
w := w.relabel(fmt.Sprintf("[%#v]", k))
w.printf("%q != (missing)", av.MapIndex(k))
}
for _, k := range both {
w := w.relabel(fmt.Sprintf("[%#v]", k))
w.diff(av.MapIndex(k), bv.MapIndex(k))
}
for _, k := range bk {
w := w.relabel(fmt.Sprintf("[%#v]", k))
w.printf("(missing) != %q", bv.MapIndex(k))
}
case reflect.Ptr:
switch {
case av.IsNil() && !bv.IsNil():
w.printf("nil != %# v", formatter{v: bv, quote: true})
case !av.IsNil() && bv.IsNil():
w.printf("%# v != nil", formatter{v: av, quote: true})
case !av.IsNil() && !bv.IsNil():
w.diff(av.Elem(), bv.Elem())
}
case reflect.Slice:
lenA := av.Len()
lenB := bv.Len()
if lenA != lenB {
w.printf("%s[%d] != %s[%d]", av.Type(), lenA, bv.Type(), lenB)
break
}
for i := 0; i < lenA; i++ {
w.relabel(fmt.Sprintf("[%d]", i)).diff(av.Index(i), bv.Index(i))
}
case reflect.String:
if a, b := av.String(), bv.String(); a != b {
w.printf("%q != %q", a, b)
}
case reflect.Struct:
for i := 0; i < av.NumField(); i++ {
w.relabel(at.Field(i).Name).diff(av.Field(i), bv.Field(i))
}
default:
panic("unknown reflect Kind: " + kind.String())
}
}
func (d diffPrinter) relabel(name string) (d1 diffPrinter) {
d1 = d
if d.l != "" && name[0] != '[' {
d1.l += "."
}
d1.l += name
return d1
}
// keyEqual compares a and b for equality.
// Both a and b must be valid map keys.
func keyEqual(av, bv reflect.Value) bool {
if !av.IsValid() && !bv.IsValid() {
return true
}
if !av.IsValid() || !bv.IsValid() || av.Type() != bv.Type() {
return false
}
switch kind := av.Kind(); kind {
case reflect.Bool:
a, b := av.Bool(), bv.Bool()
return a == b
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
a, b := av.Int(), bv.Int()
return a == b
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
a, b := av.Uint(), bv.Uint()
return a == b
case reflect.Float32, reflect.Float64:
a, b := av.Float(), bv.Float()
return a == b
case reflect.Complex64, reflect.Complex128:
a, b := av.Complex(), bv.Complex()
return a == b
case reflect.Array:
for i := 0; i < av.Len(); i++ {
if !keyEqual(av.Index(i), bv.Index(i)) {
return false
}
}
return true
case reflect.Chan, reflect.UnsafePointer, reflect.Ptr:
a, b := av.Pointer(), bv.Pointer()
return a == b
case reflect.Interface:
return keyEqual(av.Elem(), bv.Elem())
case reflect.String:
a, b := av.String(), bv.String()
return a == b
case reflect.Struct:
for i := 0; i < av.NumField(); i++ {
if !keyEqual(av.Field(i), bv.Field(i)) {
return false
}
}
return true
default:
panic("invalid map key type " + av.Type().String())
}
}
func keyDiff(a, b []reflect.Value) (ak, both, bk []reflect.Value) {
for _, av := range a {
inBoth := false
for _, bv := range b {
if keyEqual(av, bv) {
inBoth = true
both = append(both, av)
break
}
}
if !inBoth {
ak = append(ak, av)
}
}
for _, bv := range b {
inBoth := false
for _, av := range a {
if keyEqual(av, bv) {
inBoth = true
break
}
}
if !inBoth {
bk = append(bk, bv)
}
}
return
}

View file

@ -1,355 +0,0 @@
package pretty
import (
"fmt"
"io"
"reflect"
"strconv"
"text/tabwriter"
"github.com/kr/text"
"github.com/rogpeppe/go-internal/fmtsort"
)
type formatter struct {
v reflect.Value
force bool
quote bool
}
// Formatter makes a wrapper, f, that will format x as go source with line
// breaks and tabs. Object f responds to the "%v" formatting verb when both the
// "#" and " " (space) flags are set, for example:
//
// fmt.Sprintf("%# v", Formatter(x))
//
// If one of these two flags is not set, or any other verb is used, f will
// format x according to the usual rules of package fmt.
// In particular, if x satisfies fmt.Formatter, then x.Format will be called.
func Formatter(x interface{}) (f fmt.Formatter) {
return formatter{v: reflect.ValueOf(x), quote: true}
}
func (fo formatter) String() string {
return fmt.Sprint(fo.v.Interface()) // unwrap it
}
func (fo formatter) passThrough(f fmt.State, c rune) {
s := "%"
for i := 0; i < 128; i++ {
if f.Flag(i) {
s += string(rune(i))
}
}
if w, ok := f.Width(); ok {
s += fmt.Sprintf("%d", w)
}
if p, ok := f.Precision(); ok {
s += fmt.Sprintf(".%d", p)
}
s += string(c)
fmt.Fprintf(f, s, fo.v.Interface())
}
func (fo formatter) Format(f fmt.State, c rune) {
if fo.force || c == 'v' && f.Flag('#') && f.Flag(' ') {
w := tabwriter.NewWriter(f, 4, 4, 1, ' ', 0)
p := &printer{tw: w, Writer: w, visited: make(map[visit]int)}
p.printValue(fo.v, true, fo.quote)
w.Flush()
return
}
fo.passThrough(f, c)
}
type printer struct {
io.Writer
tw *tabwriter.Writer
visited map[visit]int
depth int
}
func (p *printer) indent() *printer {
q := *p
q.tw = tabwriter.NewWriter(p.Writer, 4, 4, 1, ' ', 0)
q.Writer = text.NewIndentWriter(q.tw, []byte{'\t'})
return &q
}
func (p *printer) printInline(v reflect.Value, x interface{}, showType bool) {
if showType {
io.WriteString(p, v.Type().String())
fmt.Fprintf(p, "(%#v)", x)
} else {
fmt.Fprintf(p, "%#v", x)
}
}
// printValue must keep track of already-printed pointer values to avoid
// infinite recursion.
type visit struct {
v uintptr
typ reflect.Type
}
func (p *printer) catchPanic(v reflect.Value, method string) {
if r := recover(); r != nil {
if v.Kind() == reflect.Ptr && v.IsNil() {
writeByte(p, '(')
io.WriteString(p, v.Type().String())
io.WriteString(p, ")(nil)")
return
}
writeByte(p, '(')
io.WriteString(p, v.Type().String())
io.WriteString(p, ")(PANIC=calling method ")
io.WriteString(p, strconv.Quote(method))
io.WriteString(p, ": ")
fmt.Fprint(p, r)
writeByte(p, ')')
}
}
func (p *printer) printValue(v reflect.Value, showType, quote bool) {
if p.depth > 10 {
io.WriteString(p, "!%v(DEPTH EXCEEDED)")
return
}
if v.IsValid() && v.CanInterface() {
i := v.Interface()
if goStringer, ok := i.(fmt.GoStringer); ok {
defer p.catchPanic(v, "GoString")
io.WriteString(p, goStringer.GoString())
return
}
}
switch v.Kind() {
case reflect.Bool:
p.printInline(v, v.Bool(), showType)
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
p.printInline(v, v.Int(), showType)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
p.printInline(v, v.Uint(), showType)
case reflect.Float32, reflect.Float64:
p.printInline(v, v.Float(), showType)
case reflect.Complex64, reflect.Complex128:
fmt.Fprintf(p, "%#v", v.Complex())
case reflect.String:
p.fmtString(v.String(), quote)
case reflect.Map:
t := v.Type()
if showType {
io.WriteString(p, t.String())
}
writeByte(p, '{')
if nonzero(v) {
expand := !canInline(v.Type())
pp := p
if expand {
writeByte(p, '\n')
pp = p.indent()
}
sm := fmtsort.Sort(v)
for i := 0; i < v.Len(); i++ {
k := sm.Key[i]
mv := sm.Value[i]
pp.printValue(k, false, true)
writeByte(pp, ':')
if expand {
writeByte(pp, '\t')
}
showTypeInStruct := t.Elem().Kind() == reflect.Interface
pp.printValue(mv, showTypeInStruct, true)
if expand {
io.WriteString(pp, ",\n")
} else if i < v.Len()-1 {
io.WriteString(pp, ", ")
}
}
if expand {
pp.tw.Flush()
}
}
writeByte(p, '}')
case reflect.Struct:
t := v.Type()
if v.CanAddr() {
addr := v.UnsafeAddr()
vis := visit{addr, t}
if vd, ok := p.visited[vis]; ok && vd < p.depth {
p.fmtString(t.String()+"{(CYCLIC REFERENCE)}", false)
break // don't print v again
}
p.visited[vis] = p.depth
}
if showType {
io.WriteString(p, t.String())
}
writeByte(p, '{')
if nonzero(v) {
expand := !canInline(v.Type())
pp := p
if expand {
writeByte(p, '\n')
pp = p.indent()
}
for i := 0; i < v.NumField(); i++ {
showTypeInStruct := true
if f := t.Field(i); f.Name != "" {
io.WriteString(pp, f.Name)
writeByte(pp, ':')
if expand {
writeByte(pp, '\t')
}
showTypeInStruct = labelType(f.Type)
}
pp.printValue(getField(v, i), showTypeInStruct, true)
if expand {
io.WriteString(pp, ",\n")
} else if i < v.NumField()-1 {
io.WriteString(pp, ", ")
}
}
if expand {
pp.tw.Flush()
}
}
writeByte(p, '}')
case reflect.Interface:
switch e := v.Elem(); {
case e.Kind() == reflect.Invalid:
io.WriteString(p, "nil")
case e.IsValid():
pp := *p
pp.depth++
pp.printValue(e, showType, true)
default:
io.WriteString(p, v.Type().String())
io.WriteString(p, "(nil)")
}
case reflect.Array, reflect.Slice:
t := v.Type()
if showType {
io.WriteString(p, t.String())
}
if v.Kind() == reflect.Slice && v.IsNil() && showType {
io.WriteString(p, "(nil)")
break
}
if v.Kind() == reflect.Slice && v.IsNil() {
io.WriteString(p, "nil")
break
}
writeByte(p, '{')
expand := !canInline(v.Type())
pp := p
if expand {
writeByte(p, '\n')
pp = p.indent()
}
for i := 0; i < v.Len(); i++ {
showTypeInSlice := t.Elem().Kind() == reflect.Interface
pp.printValue(v.Index(i), showTypeInSlice, true)
if expand {
io.WriteString(pp, ",\n")
} else if i < v.Len()-1 {
io.WriteString(pp, ", ")
}
}
if expand {
pp.tw.Flush()
}
writeByte(p, '}')
case reflect.Ptr:
e := v.Elem()
if !e.IsValid() {
writeByte(p, '(')
io.WriteString(p, v.Type().String())
io.WriteString(p, ")(nil)")
} else {
pp := *p
pp.depth++
writeByte(pp, '&')
pp.printValue(e, true, true)
}
case reflect.Chan:
x := v.Pointer()
if showType {
writeByte(p, '(')
io.WriteString(p, v.Type().String())
fmt.Fprintf(p, ")(%#v)", x)
} else {
fmt.Fprintf(p, "%#v", x)
}
case reflect.Func:
io.WriteString(p, v.Type().String())
io.WriteString(p, " {...}")
case reflect.UnsafePointer:
p.printInline(v, v.Pointer(), showType)
case reflect.Invalid:
io.WriteString(p, "nil")
}
}
func canInline(t reflect.Type) bool {
switch t.Kind() {
case reflect.Map:
return !canExpand(t.Elem())
case reflect.Struct:
for i := 0; i < t.NumField(); i++ {
if canExpand(t.Field(i).Type) {
return false
}
}
return true
case reflect.Interface:
return false
case reflect.Array, reflect.Slice:
return !canExpand(t.Elem())
case reflect.Ptr:
return false
case reflect.Chan, reflect.Func, reflect.UnsafePointer:
return false
}
return true
}
func canExpand(t reflect.Type) bool {
switch t.Kind() {
case reflect.Map, reflect.Struct,
reflect.Interface, reflect.Array, reflect.Slice,
reflect.Ptr:
return true
}
return false
}
func labelType(t reflect.Type) bool {
switch t.Kind() {
case reflect.Interface, reflect.Struct:
return true
}
return false
}
func (p *printer) fmtString(s string, quote bool) {
if quote {
s = strconv.Quote(s)
}
io.WriteString(p, s)
}
func writeByte(w io.Writer, b byte) {
w.Write([]byte{b})
}
func getField(v reflect.Value, i int) reflect.Value {
val := v.Field(i)
if val.Kind() == reflect.Interface && !val.IsNil() {
val = val.Elem()
}
return val
}

108
vendor/github.com/kr/pretty/pretty.go generated vendored
View file

@ -1,108 +0,0 @@
// Package pretty provides pretty-printing for Go values. This is
// useful during debugging, to avoid wrapping long output lines in
// the terminal.
//
// It provides a function, Formatter, that can be used with any
// function that accepts a format string. It also provides
// convenience wrappers for functions in packages fmt and log.
package pretty
import (
"fmt"
"io"
"log"
"reflect"
)
// Errorf is a convenience wrapper for fmt.Errorf.
//
// Calling Errorf(f, x, y) is equivalent to
// fmt.Errorf(f, Formatter(x), Formatter(y)).
func Errorf(format string, a ...interface{}) error {
return fmt.Errorf(format, wrap(a, false)...)
}
// Fprintf is a convenience wrapper for fmt.Fprintf.
//
// Calling Fprintf(w, f, x, y) is equivalent to
// fmt.Fprintf(w, f, Formatter(x), Formatter(y)).
func Fprintf(w io.Writer, format string, a ...interface{}) (n int, error error) {
return fmt.Fprintf(w, format, wrap(a, false)...)
}
// Log is a convenience wrapper for log.Printf.
//
// Calling Log(x, y) is equivalent to
// log.Print(Formatter(x), Formatter(y)), but each operand is
// formatted with "%# v".
func Log(a ...interface{}) {
log.Print(wrap(a, true)...)
}
// Logf is a convenience wrapper for log.Printf.
//
// Calling Logf(f, x, y) is equivalent to
// log.Printf(f, Formatter(x), Formatter(y)).
func Logf(format string, a ...interface{}) {
log.Printf(format, wrap(a, false)...)
}
// Logln is a convenience wrapper for log.Printf.
//
// Calling Logln(x, y) is equivalent to
// log.Println(Formatter(x), Formatter(y)), but each operand is
// formatted with "%# v".
func Logln(a ...interface{}) {
log.Println(wrap(a, true)...)
}
// Print pretty-prints its operands and writes to standard output.
//
// Calling Print(x, y) is equivalent to
// fmt.Print(Formatter(x), Formatter(y)), but each operand is
// formatted with "%# v".
func Print(a ...interface{}) (n int, errno error) {
return fmt.Print(wrap(a, true)...)
}
// Printf is a convenience wrapper for fmt.Printf.
//
// Calling Printf(f, x, y) is equivalent to
// fmt.Printf(f, Formatter(x), Formatter(y)).
func Printf(format string, a ...interface{}) (n int, errno error) {
return fmt.Printf(format, wrap(a, false)...)
}
// Println pretty-prints its operands and writes to standard output.
//
// Calling Println(x, y) is equivalent to
// fmt.Println(Formatter(x), Formatter(y)), but each operand is
// formatted with "%# v".
func Println(a ...interface{}) (n int, errno error) {
return fmt.Println(wrap(a, true)...)
}
// Sprint is a convenience wrapper for fmt.Sprintf.
//
// Calling Sprint(x, y) is equivalent to
// fmt.Sprint(Formatter(x), Formatter(y)), but each operand is
// formatted with "%# v".
func Sprint(a ...interface{}) string {
return fmt.Sprint(wrap(a, true)...)
}
// Sprintf is a convenience wrapper for fmt.Sprintf.
//
// Calling Sprintf(f, x, y) is equivalent to
// fmt.Sprintf(f, Formatter(x), Formatter(y)).
func Sprintf(format string, a ...interface{}) string {
return fmt.Sprintf(format, wrap(a, false)...)
}
func wrap(a []interface{}, force bool) []interface{} {
w := make([]interface{}, len(a))
for i, x := range a {
w[i] = formatter{v: reflect.ValueOf(x), force: force}
}
return w
}

41
vendor/github.com/kr/pretty/zero.go generated vendored
View file

@ -1,41 +0,0 @@
package pretty
import (
"reflect"
)
func nonzero(v reflect.Value) bool {
switch v.Kind() {
case reflect.Bool:
return v.Bool()
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return v.Int() != 0
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return v.Uint() != 0
case reflect.Float32, reflect.Float64:
return v.Float() != 0
case reflect.Complex64, reflect.Complex128:
return v.Complex() != complex(0, 0)
case reflect.String:
return v.String() != ""
case reflect.Struct:
for i := 0; i < v.NumField(); i++ {
if nonzero(getField(v, i)) {
return true
}
}
return false
case reflect.Array:
for i := 0; i < v.Len(); i++ {
if nonzero(v.Index(i)) {
return true
}
}
return false
case reflect.Map, reflect.Interface, reflect.Slice, reflect.Ptr, reflect.Chan, reflect.Func:
return !v.IsNil()
case reflect.UnsafePointer:
return v.Pointer() != 0
}
return true
}

19
vendor/github.com/kr/text/License generated vendored
View file

@ -1,19 +0,0 @@
Copyright 2012 Keith Rarick
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

3
vendor/github.com/kr/text/Readme generated vendored
View file

@ -1,3 +0,0 @@
This is a Go package for manipulating paragraphs of text.
See http://go.pkgdoc.org/github.com/kr/text for full documentation.

3
vendor/github.com/kr/text/doc.go generated vendored
View file

@ -1,3 +0,0 @@
// Package text provides rudimentary functions for manipulating text in
// paragraphs.
package text

74
vendor/github.com/kr/text/indent.go generated vendored
View file

@ -1,74 +0,0 @@
package text
import (
"io"
)
// Indent inserts prefix at the beginning of each non-empty line of s. The
// end-of-line marker is NL.
func Indent(s, prefix string) string {
return string(IndentBytes([]byte(s), []byte(prefix)))
}
// IndentBytes inserts prefix at the beginning of each non-empty line of b.
// The end-of-line marker is NL.
func IndentBytes(b, prefix []byte) []byte {
var res []byte
bol := true
for _, c := range b {
if bol && c != '\n' {
res = append(res, prefix...)
}
res = append(res, c)
bol = c == '\n'
}
return res
}
// Writer indents each line of its input.
type indentWriter struct {
w io.Writer
bol bool
pre [][]byte
sel int
off int
}
// NewIndentWriter makes a new write filter that indents the input
// lines. Each line is prefixed in order with the corresponding
// element of pre. If there are more lines than elements, the last
// element of pre is repeated for each subsequent line.
func NewIndentWriter(w io.Writer, pre ...[]byte) io.Writer {
return &indentWriter{
w: w,
pre: pre,
bol: true,
}
}
// The only errors returned are from the underlying indentWriter.
func (w *indentWriter) Write(p []byte) (n int, err error) {
for _, c := range p {
if w.bol {
var i int
i, err = w.w.Write(w.pre[w.sel][w.off:])
w.off += i
if err != nil {
return n, err
}
}
_, err = w.w.Write([]byte{c})
if err != nil {
return n, err
}
n++
w.bol = c == '\n'
if w.bol {
w.off = 0
if w.sel < len(w.pre)-1 {
w.sel++
}
}
}
return n, nil
}

86
vendor/github.com/kr/text/wrap.go generated vendored
View file

@ -1,86 +0,0 @@
package text
import (
"bytes"
"math"
)
var (
nl = []byte{'\n'}
sp = []byte{' '}
)
const defaultPenalty = 1e5
// Wrap wraps s into a paragraph of lines of length lim, with minimal
// raggedness.
func Wrap(s string, lim int) string {
return string(WrapBytes([]byte(s), lim))
}
// WrapBytes wraps b into a paragraph of lines of length lim, with minimal
// raggedness.
func WrapBytes(b []byte, lim int) []byte {
words := bytes.Split(bytes.Replace(bytes.TrimSpace(b), nl, sp, -1), sp)
var lines [][]byte
for _, line := range WrapWords(words, 1, lim, defaultPenalty) {
lines = append(lines, bytes.Join(line, sp))
}
return bytes.Join(lines, nl)
}
// WrapWords is the low-level line-breaking algorithm, useful if you need more
// control over the details of the text wrapping process. For most uses, either
// Wrap or WrapBytes will be sufficient and more convenient.
//
// WrapWords splits a list of words into lines with minimal "raggedness",
// treating each byte as one unit, accounting for spc units between adjacent
// words on each line, and attempting to limit lines to lim units. Raggedness
// is the total error over all lines, where error is the square of the
// difference of the length of the line and lim. Too-long lines (which only
// happen when a single word is longer than lim units) have pen penalty units
// added to the error.
func WrapWords(words [][]byte, spc, lim, pen int) [][][]byte {
n := len(words)
length := make([][]int, n)
for i := 0; i < n; i++ {
length[i] = make([]int, n)
length[i][i] = len(words[i])
for j := i + 1; j < n; j++ {
length[i][j] = length[i][j-1] + spc + len(words[j])
}
}
nbrk := make([]int, n)
cost := make([]int, n)
for i := range cost {
cost[i] = math.MaxInt32
}
for i := n - 1; i >= 0; i-- {
if length[i][n-1] <= lim || i == n-1 {
cost[i] = 0
nbrk[i] = n
} else {
for j := i + 1; j < n; j++ {
d := lim - length[i][j-1]
c := d*d + cost[j]
if length[i][j-1] > lim {
c += pen // too-long lines get a worse penalty
}
if c < cost[i] {
cost[i] = c
nbrk[i] = j
}
}
}
}
var lines [][][]byte
i := 0
for i < n {
lines = append(lines, words[i:nbrk[i]])
i = nbrk[i]
}
return lines
}

27
vendor/github.com/pmezard/go-difflib/LICENSE generated vendored Normal file
View file

@ -0,0 +1,27 @@
Copyright (c) 2013, Patrick Mezard
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
The names of its contributors may not be used to endorse or promote
products derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

772
vendor/github.com/pmezard/go-difflib/difflib/difflib.go generated vendored Normal file
View file

@ -0,0 +1,772 @@
// Package difflib is a partial port of Python difflib module.
//
// It provides tools to compare sequences of strings and generate textual diffs.
//
// The following class and functions have been ported:
//
// - SequenceMatcher
//
// - unified_diff
//
// - context_diff
//
// Getting unified diffs was the main goal of the port. Keep in mind this code
// is mostly suitable to output text differences in a human friendly way, there
// are no guarantees generated diffs are consumable by patch(1).
package difflib
import (
"bufio"
"bytes"
"fmt"
"io"
"strings"
)
func min(a, b int) int {
if a < b {
return a
}
return b
}
func max(a, b int) int {
if a > b {
return a
}
return b
}
func calculateRatio(matches, length int) float64 {
if length > 0 {
return 2.0 * float64(matches) / float64(length)
}
return 1.0
}
type Match struct {
A int
B int
Size int
}
type OpCode struct {
Tag byte
I1 int
I2 int
J1 int
J2 int
}
// SequenceMatcher compares sequence of strings. The basic
// algorithm predates, and is a little fancier than, an algorithm
// published in the late 1980's by Ratcliff and Obershelp under the
// hyperbolic name "gestalt pattern matching". The basic idea is to find
// the longest contiguous matching subsequence that contains no "junk"
// elements (R-O doesn't address junk). The same idea is then applied
// recursively to the pieces of the sequences to the left and to the right
// of the matching subsequence. This does not yield minimal edit
// sequences, but does tend to yield matches that "look right" to people.
//
// SequenceMatcher tries to compute a "human-friendly diff" between two
// sequences. Unlike e.g. UNIX(tm) diff, the fundamental notion is the
// longest *contiguous* & junk-free matching subsequence. That's what
// catches peoples' eyes. The Windows(tm) windiff has another interesting
// notion, pairing up elements that appear uniquely in each sequence.
// That, and the method here, appear to yield more intuitive difference
// reports than does diff. This method appears to be the least vulnerable
// to synching up on blocks of "junk lines", though (like blank lines in
// ordinary text files, or maybe "<P>" lines in HTML files). That may be
// because this is the only method of the 3 that has a *concept* of
// "junk" <wink>.
//
// Timing: Basic R-O is cubic time worst case and quadratic time expected
// case. SequenceMatcher is quadratic time for the worst case and has
// expected-case behavior dependent in a complicated way on how many
// elements the sequences have in common; best case time is linear.
type SequenceMatcher struct {
a []string
b []string
b2j map[string][]int
IsJunk func(string) bool
autoJunk bool
bJunk map[string]struct{}
matchingBlocks []Match
fullBCount map[string]int
bPopular map[string]struct{}
opCodes []OpCode
}
func NewMatcher(a, b []string) *SequenceMatcher {
m := SequenceMatcher{autoJunk: true}
m.SetSeqs(a, b)
return &m
}
func NewMatcherWithJunk(a, b []string, autoJunk bool,
isJunk func(string) bool) *SequenceMatcher {
m := SequenceMatcher{IsJunk: isJunk, autoJunk: autoJunk}
m.SetSeqs(a, b)
return &m
}
// Set two sequences to be compared.
func (m *SequenceMatcher) SetSeqs(a, b []string) {
m.SetSeq1(a)
m.SetSeq2(b)
}
// Set the first sequence to be compared. The second sequence to be compared is
// not changed.
//
// SequenceMatcher computes and caches detailed information about the second
// sequence, so if you want to compare one sequence S against many sequences,
// use .SetSeq2(s) once and call .SetSeq1(x) repeatedly for each of the other
// sequences.
//
// See also SetSeqs() and SetSeq2().
func (m *SequenceMatcher) SetSeq1(a []string) {
if &a == &m.a {
return
}
m.a = a
m.matchingBlocks = nil
m.opCodes = nil
}
// Set the second sequence to be compared. The first sequence to be compared is
// not changed.
func (m *SequenceMatcher) SetSeq2(b []string) {
if &b == &m.b {
return
}
m.b = b
m.matchingBlocks = nil
m.opCodes = nil
m.fullBCount = nil
m.chainB()
}
func (m *SequenceMatcher) chainB() {
// Populate line -> index mapping
b2j := map[string][]int{}
for i, s := range m.b {
indices := b2j[s]
indices = append(indices, i)
b2j[s] = indices
}
// Purge junk elements
m.bJunk = map[string]struct{}{}
if m.IsJunk != nil {
junk := m.bJunk
for s, _ := range b2j {
if m.IsJunk(s) {
junk[s] = struct{}{}
}
}
for s, _ := range junk {
delete(b2j, s)
}
}
// Purge remaining popular elements
popular := map[string]struct{}{}
n := len(m.b)
if m.autoJunk && n >= 200 {
ntest := n/100 + 1
for s, indices := range b2j {
if len(indices) > ntest {
popular[s] = struct{}{}
}
}
for s, _ := range popular {
delete(b2j, s)
}
}
m.bPopular = popular
m.b2j = b2j
}
func (m *SequenceMatcher) isBJunk(s string) bool {
_, ok := m.bJunk[s]
return ok
}
// Find longest matching block in a[alo:ahi] and b[blo:bhi].
//
// If IsJunk is not defined:
//
// Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where
// alo <= i <= i+k <= ahi
// blo <= j <= j+k <= bhi
// and for all (i',j',k') meeting those conditions,
// k >= k'
// i <= i'
// and if i == i', j <= j'
//
// In other words, of all maximal matching blocks, return one that
// starts earliest in a, and of all those maximal matching blocks that
// start earliest in a, return the one that starts earliest in b.
//
// If IsJunk is defined, first the longest matching block is
// determined as above, but with the additional restriction that no
// junk element appears in the block. Then that block is extended as
// far as possible by matching (only) junk elements on both sides. So
// the resulting block never matches on junk except as identical junk
// happens to be adjacent to an "interesting" match.
//
// If no blocks match, return (alo, blo, 0).
func (m *SequenceMatcher) findLongestMatch(alo, ahi, blo, bhi int) Match {
// CAUTION: stripping common prefix or suffix would be incorrect.
// E.g.,
// ab
// acab
// Longest matching block is "ab", but if common prefix is
// stripped, it's "a" (tied with "b"). UNIX(tm) diff does so
// strip, so ends up claiming that ab is changed to acab by
// inserting "ca" in the middle. That's minimal but unintuitive:
// "it's obvious" that someone inserted "ac" at the front.
// Windiff ends up at the same place as diff, but by pairing up
// the unique 'b's and then matching the first two 'a's.
besti, bestj, bestsize := alo, blo, 0
// find longest junk-free match
// during an iteration of the loop, j2len[j] = length of longest
// junk-free match ending with a[i-1] and b[j]
j2len := map[int]int{}
for i := alo; i != ahi; i++ {
// look at all instances of a[i] in b; note that because
// b2j has no junk keys, the loop is skipped if a[i] is junk
newj2len := map[int]int{}
for _, j := range m.b2j[m.a[i]] {
// a[i] matches b[j]
if j < blo {
continue
}
if j >= bhi {
break
}
k := j2len[j-1] + 1
newj2len[j] = k
if k > bestsize {
besti, bestj, bestsize = i-k+1, j-k+1, k
}
}
j2len = newj2len
}
// Extend the best by non-junk elements on each end. In particular,
// "popular" non-junk elements aren't in b2j, which greatly speeds
// the inner loop above, but also means "the best" match so far
// doesn't contain any junk *or* popular non-junk elements.
for besti > alo && bestj > blo && !m.isBJunk(m.b[bestj-1]) &&
m.a[besti-1] == m.b[bestj-1] {
besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
}
for besti+bestsize < ahi && bestj+bestsize < bhi &&
!m.isBJunk(m.b[bestj+bestsize]) &&
m.a[besti+bestsize] == m.b[bestj+bestsize] {
bestsize += 1
}
// Now that we have a wholly interesting match (albeit possibly
// empty!), we may as well suck up the matching junk on each
// side of it too. Can't think of a good reason not to, and it
// saves post-processing the (possibly considerable) expense of
// figuring out what to do with it. In the case of an empty
// interesting match, this is clearly the right thing to do,
// because no other kind of match is possible in the regions.
for besti > alo && bestj > blo && m.isBJunk(m.b[bestj-1]) &&
m.a[besti-1] == m.b[bestj-1] {
besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
}
for besti+bestsize < ahi && bestj+bestsize < bhi &&
m.isBJunk(m.b[bestj+bestsize]) &&
m.a[besti+bestsize] == m.b[bestj+bestsize] {
bestsize += 1
}
return Match{A: besti, B: bestj, Size: bestsize}
}
// Return list of triples describing matching subsequences.
//
// Each triple is of the form (i, j, n), and means that
// a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in
// i and in j. It's also guaranteed that if (i, j, n) and (i', j', n') are
// adjacent triples in the list, and the second is not the last triple in the
// list, then i+n != i' or j+n != j'. IOW, adjacent triples never describe
// adjacent equal blocks.
//
// The last triple is a dummy, (len(a), len(b), 0), and is the only
// triple with n==0.
func (m *SequenceMatcher) GetMatchingBlocks() []Match {
if m.matchingBlocks != nil {
return m.matchingBlocks
}
var matchBlocks func(alo, ahi, blo, bhi int, matched []Match) []Match
matchBlocks = func(alo, ahi, blo, bhi int, matched []Match) []Match {
match := m.findLongestMatch(alo, ahi, blo, bhi)
i, j, k := match.A, match.B, match.Size
if match.Size > 0 {
if alo < i && blo < j {
matched = matchBlocks(alo, i, blo, j, matched)
}
matched = append(matched, match)
if i+k < ahi && j+k < bhi {
matched = matchBlocks(i+k, ahi, j+k, bhi, matched)
}
}
return matched
}
matched := matchBlocks(0, len(m.a), 0, len(m.b), nil)
// It's possible that we have adjacent equal blocks in the
// matching_blocks list now.
nonAdjacent := []Match{}
i1, j1, k1 := 0, 0, 0
for _, b := range matched {
// Is this block adjacent to i1, j1, k1?
i2, j2, k2 := b.A, b.B, b.Size
if i1+k1 == i2 && j1+k1 == j2 {
// Yes, so collapse them -- this just increases the length of
// the first block by the length of the second, and the first
// block so lengthened remains the block to compare against.
k1 += k2
} else {
// Not adjacent. Remember the first block (k1==0 means it's
// the dummy we started with), and make the second block the
// new block to compare against.
if k1 > 0 {
nonAdjacent = append(nonAdjacent, Match{i1, j1, k1})
}
i1, j1, k1 = i2, j2, k2
}
}
if k1 > 0 {
nonAdjacent = append(nonAdjacent, Match{i1, j1, k1})
}
nonAdjacent = append(nonAdjacent, Match{len(m.a), len(m.b), 0})
m.matchingBlocks = nonAdjacent
return m.matchingBlocks
}
// Return list of 5-tuples describing how to turn a into b.
//
// Each tuple is of the form (tag, i1, i2, j1, j2). The first tuple
// has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the
// tuple preceding it, and likewise for j1 == the previous j2.
//
// The tags are characters, with these meanings:
//
// 'r' (replace): a[i1:i2] should be replaced by b[j1:j2]
//
// 'd' (delete): a[i1:i2] should be deleted, j1==j2 in this case.
//
// 'i' (insert): b[j1:j2] should be inserted at a[i1:i1], i1==i2 in this case.
//
// 'e' (equal): a[i1:i2] == b[j1:j2]
func (m *SequenceMatcher) GetOpCodes() []OpCode {
if m.opCodes != nil {
return m.opCodes
}
i, j := 0, 0
matching := m.GetMatchingBlocks()
opCodes := make([]OpCode, 0, len(matching))
for _, m := range matching {
// invariant: we've pumped out correct diffs to change
// a[:i] into b[:j], and the next matching block is
// a[ai:ai+size] == b[bj:bj+size]. So we need to pump
// out a diff to change a[i:ai] into b[j:bj], pump out
// the matching block, and move (i,j) beyond the match
ai, bj, size := m.A, m.B, m.Size
tag := byte(0)
if i < ai && j < bj {
tag = 'r'
} else if i < ai {
tag = 'd'
} else if j < bj {
tag = 'i'
}
if tag > 0 {
opCodes = append(opCodes, OpCode{tag, i, ai, j, bj})
}
i, j = ai+size, bj+size
// the list of matching blocks is terminated by a
// sentinel with size 0
if size > 0 {
opCodes = append(opCodes, OpCode{'e', ai, i, bj, j})
}
}
m.opCodes = opCodes
return m.opCodes
}
// Isolate change clusters by eliminating ranges with no changes.
//
// Return a generator of groups with up to n lines of context.
// Each group is in the same format as returned by GetOpCodes().
func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode {
if n < 0 {
n = 3
}
codes := m.GetOpCodes()
if len(codes) == 0 {
codes = []OpCode{OpCode{'e', 0, 1, 0, 1}}
}
// Fixup leading and trailing groups if they show no changes.
if codes[0].Tag == 'e' {
c := codes[0]
i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
codes[0] = OpCode{c.Tag, max(i1, i2-n), i2, max(j1, j2-n), j2}
}
if codes[len(codes)-1].Tag == 'e' {
c := codes[len(codes)-1]
i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
codes[len(codes)-1] = OpCode{c.Tag, i1, min(i2, i1+n), j1, min(j2, j1+n)}
}
nn := n + n
groups := [][]OpCode{}
group := []OpCode{}
for _, c := range codes {
i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
// End the current group and start a new one whenever
// there is a large range with no changes.
if c.Tag == 'e' && i2-i1 > nn {
group = append(group, OpCode{c.Tag, i1, min(i2, i1+n),
j1, min(j2, j1+n)})
groups = append(groups, group)
group = []OpCode{}
i1, j1 = max(i1, i2-n), max(j1, j2-n)
}
group = append(group, OpCode{c.Tag, i1, i2, j1, j2})
}
if len(group) > 0 && !(len(group) == 1 && group[0].Tag == 'e') {
groups = append(groups, group)
}
return groups
}
// Return a measure of the sequences' similarity (float in [0,1]).
//
// Where T is the total number of elements in both sequences, and
// M is the number of matches, this is 2.0*M / T.
// Note that this is 1 if the sequences are identical, and 0 if
// they have nothing in common.
//
// .Ratio() is expensive to compute if you haven't already computed
// .GetMatchingBlocks() or .GetOpCodes(), in which case you may
// want to try .QuickRatio() or .RealQuickRation() first to get an
// upper bound.
func (m *SequenceMatcher) Ratio() float64 {
matches := 0
for _, m := range m.GetMatchingBlocks() {
matches += m.Size
}
return calculateRatio(matches, len(m.a)+len(m.b))
}
// Return an upper bound on ratio() relatively quickly.
//
// This isn't defined beyond that it is an upper bound on .Ratio(), and
// is faster to compute.
func (m *SequenceMatcher) QuickRatio() float64 {
// viewing a and b as multisets, set matches to the cardinality
// of their intersection; this counts the number of matches
// without regard to order, so is clearly an upper bound
if m.fullBCount == nil {
m.fullBCount = map[string]int{}
for _, s := range m.b {
m.fullBCount[s] = m.fullBCount[s] + 1
}
}
// avail[x] is the number of times x appears in 'b' less the
// number of times we've seen it in 'a' so far ... kinda
avail := map[string]int{}
matches := 0
for _, s := range m.a {
n, ok := avail[s]
if !ok {
n = m.fullBCount[s]
}
avail[s] = n - 1
if n > 0 {
matches += 1
}
}
return calculateRatio(matches, len(m.a)+len(m.b))
}
// Return an upper bound on ratio() very quickly.
//
// This isn't defined beyond that it is an upper bound on .Ratio(), and
// is faster to compute than either .Ratio() or .QuickRatio().
func (m *SequenceMatcher) RealQuickRatio() float64 {
la, lb := len(m.a), len(m.b)
return calculateRatio(min(la, lb), la+lb)
}
// Convert range to the "ed" format
func formatRangeUnified(start, stop int) string {
// Per the diff spec at http://www.unix.org/single_unix_specification/
beginning := start + 1 // lines start numbering with one
length := stop - start
if length == 1 {
return fmt.Sprintf("%d", beginning)
}
if length == 0 {
beginning -= 1 // empty ranges begin at line just before the range
}
return fmt.Sprintf("%d,%d", beginning, length)
}
// Unified diff parameters
type UnifiedDiff struct {
A []string // First sequence lines
FromFile string // First file name
FromDate string // First file time
B []string // Second sequence lines
ToFile string // Second file name
ToDate string // Second file time
Eol string // Headers end of line, defaults to LF
Context int // Number of context lines
}
// Compare two sequences of lines; generate the delta as a unified diff.
//
// Unified diffs are a compact way of showing line changes and a few
// lines of context. The number of context lines is set by 'n' which
// defaults to three.
//
// By default, the diff control lines (those with ---, +++, or @@) are
// created with a trailing newline. This is helpful so that inputs
// created from file.readlines() result in diffs that are suitable for
// file.writelines() since both the inputs and outputs have trailing
// newlines.
//
// For inputs that do not have trailing newlines, set the lineterm
// argument to "" so that the output will be uniformly newline free.
//
// The unidiff format normally has a header for filenames and modification
// times. Any or all of these may be specified using strings for
// 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'.
// The modification times are normally expressed in the ISO 8601 format.
func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error {
buf := bufio.NewWriter(writer)
defer buf.Flush()
wf := func(format string, args ...interface{}) error {
_, err := buf.WriteString(fmt.Sprintf(format, args...))
return err
}
ws := func(s string) error {
_, err := buf.WriteString(s)
return err
}
if len(diff.Eol) == 0 {
diff.Eol = "\n"
}
started := false
m := NewMatcher(diff.A, diff.B)
for _, g := range m.GetGroupedOpCodes(diff.Context) {
if !started {
started = true
fromDate := ""
if len(diff.FromDate) > 0 {
fromDate = "\t" + diff.FromDate
}
toDate := ""
if len(diff.ToDate) > 0 {
toDate = "\t" + diff.ToDate
}
if diff.FromFile != "" || diff.ToFile != "" {
err := wf("--- %s%s%s", diff.FromFile, fromDate, diff.Eol)
if err != nil {
return err
}
err = wf("+++ %s%s%s", diff.ToFile, toDate, diff.Eol)
if err != nil {
return err
}
}
}
first, last := g[0], g[len(g)-1]
range1 := formatRangeUnified(first.I1, last.I2)
range2 := formatRangeUnified(first.J1, last.J2)
if err := wf("@@ -%s +%s @@%s", range1, range2, diff.Eol); err != nil {
return err
}
for _, c := range g {
i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
if c.Tag == 'e' {
for _, line := range diff.A[i1:i2] {
if err := ws(" " + line); err != nil {
return err
}
}
continue
}
if c.Tag == 'r' || c.Tag == 'd' {
for _, line := range diff.A[i1:i2] {
if err := ws("-" + line); err != nil {
return err
}
}
}
if c.Tag == 'r' || c.Tag == 'i' {
for _, line := range diff.B[j1:j2] {
if err := ws("+" + line); err != nil {
return err
}
}
}
}
}
return nil
}
// Like WriteUnifiedDiff but returns the diff a string.
func GetUnifiedDiffString(diff UnifiedDiff) (string, error) {
w := &bytes.Buffer{}
err := WriteUnifiedDiff(w, diff)
return string(w.Bytes()), err
}
// Convert range to the "ed" format.
func formatRangeContext(start, stop int) string {
// Per the diff spec at http://www.unix.org/single_unix_specification/
beginning := start + 1 // lines start numbering with one
length := stop - start
if length == 0 {
beginning -= 1 // empty ranges begin at line just before the range
}
if length <= 1 {
return fmt.Sprintf("%d", beginning)
}
return fmt.Sprintf("%d,%d", beginning, beginning+length-1)
}
type ContextDiff UnifiedDiff
// Compare two sequences of lines; generate the delta as a context diff.
//
// Context diffs are a compact way of showing line changes and a few
// lines of context. The number of context lines is set by diff.Context
// which defaults to three.
//
// By default, the diff control lines (those with *** or ---) are
// created with a trailing newline.
//
// For inputs that do not have trailing newlines, set the diff.Eol
// argument to "" so that the output will be uniformly newline free.
//
// The context diff format normally has a header for filenames and
// modification times. Any or all of these may be specified using
// strings for diff.FromFile, diff.ToFile, diff.FromDate, diff.ToDate.
// The modification times are normally expressed in the ISO 8601 format.
// If not specified, the strings default to blanks.
func WriteContextDiff(writer io.Writer, diff ContextDiff) error {
buf := bufio.NewWriter(writer)
defer buf.Flush()
var diffErr error
wf := func(format string, args ...interface{}) {
_, err := buf.WriteString(fmt.Sprintf(format, args...))
if diffErr == nil && err != nil {
diffErr = err
}
}
ws := func(s string) {
_, err := buf.WriteString(s)
if diffErr == nil && err != nil {
diffErr = err
}
}
if len(diff.Eol) == 0 {
diff.Eol = "\n"
}
prefix := map[byte]string{
'i': "+ ",
'd': "- ",
'r': "! ",
'e': " ",
}
started := false
m := NewMatcher(diff.A, diff.B)
for _, g := range m.GetGroupedOpCodes(diff.Context) {
if !started {
started = true
fromDate := ""
if len(diff.FromDate) > 0 {
fromDate = "\t" + diff.FromDate
}
toDate := ""
if len(diff.ToDate) > 0 {
toDate = "\t" + diff.ToDate
}
if diff.FromFile != "" || diff.ToFile != "" {
wf("*** %s%s%s", diff.FromFile, fromDate, diff.Eol)
wf("--- %s%s%s", diff.ToFile, toDate, diff.Eol)
}
}
first, last := g[0], g[len(g)-1]
ws("***************" + diff.Eol)
range1 := formatRangeContext(first.I1, last.I2)
wf("*** %s ****%s", range1, diff.Eol)
for _, c := range g {
if c.Tag == 'r' || c.Tag == 'd' {
for _, cc := range g {
if cc.Tag == 'i' {
continue
}
for _, line := range diff.A[cc.I1:cc.I2] {
ws(prefix[cc.Tag] + line)
}
}
break
}
}
range2 := formatRangeContext(first.J1, last.J2)
wf("--- %s ----%s", range2, diff.Eol)
for _, c := range g {
if c.Tag == 'r' || c.Tag == 'i' {
for _, cc := range g {
if cc.Tag == 'd' {
continue
}
for _, line := range diff.B[cc.J1:cc.J2] {
ws(prefix[cc.Tag] + line)
}
}
break
}
}
}
return diffErr
}
// Like WriteContextDiff but returns the diff a string.
func GetContextDiffString(diff ContextDiff) (string, error) {
w := &bytes.Buffer{}
err := WriteContextDiff(w, diff)
return string(w.Bytes()), err
}
// Split a string on "\n" while preserving them. The output can be used
// as input for UnifiedDiff and ContextDiff structures.
func SplitLines(s string) []string {
lines := strings.SplitAfter(s, "\n")
lines[len(lines)-1] += "\n"
return lines
}

View file

@ -1,27 +0,0 @@
Copyright (c) 2018 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View file

@ -1,20 +0,0 @@
package fmtsort
import "reflect"
const brokenNaNs = false
func mapElems(mapValue reflect.Value) ([]reflect.Value, []reflect.Value) {
// Note: this code is arranged to not panic even in the presence
// of a concurrent map update. The runtime is responsible for
// yelling loudly if that happens. See issue 33275.
n := mapValue.Len()
key := make([]reflect.Value, 0, n)
value := make([]reflect.Value, 0, n)
iter := mapValue.MapRange()
for iter.Next() {
key = append(key, iter.Key())
value = append(value, iter.Value())
}
return key, value
}

View file

@ -1,209 +0,0 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package fmtsort provides a general stable ordering mechanism
// for maps, on behalf of the fmt and text/template packages.
// It is not guaranteed to be efficient and works only for types
// that are valid map keys.
package fmtsort
import (
"reflect"
"sort"
)
// Note: Throughout this package we avoid calling reflect.Value.Interface as
// it is not always legal to do so and it's easier to avoid the issue than to face it.
// SortedMap represents a map's keys and values. The keys and values are
// aligned in index order: Value[i] is the value in the map corresponding to Key[i].
type SortedMap struct {
Key []reflect.Value
Value []reflect.Value
}
func (o *SortedMap) Len() int { return len(o.Key) }
func (o *SortedMap) Less(i, j int) bool { return compare(o.Key[i], o.Key[j]) < 0 }
func (o *SortedMap) Swap(i, j int) {
o.Key[i], o.Key[j] = o.Key[j], o.Key[i]
o.Value[i], o.Value[j] = o.Value[j], o.Value[i]
}
// Sort accepts a map and returns a SortedMap that has the same keys and
// values but in a stable sorted order according to the keys, modulo issues
// raised by unorderable key values such as NaNs.
//
// The ordering rules are more general than with Go's < operator:
//
// - when applicable, nil compares low
// - ints, floats, and strings order by <
// - NaN compares less than non-NaN floats
// - bool compares false before true
// - complex compares real, then imag
// - pointers compare by machine address
// - channel values compare by machine address
// - structs compare each field in turn
// - arrays compare each element in turn.
// Otherwise identical arrays compare by length.
// - interface values compare first by reflect.Type describing the concrete type
// and then by concrete value as described in the previous rules.
func Sort(mapValue reflect.Value) *SortedMap {
if mapValue.Type().Kind() != reflect.Map {
return nil
}
key, value := mapElems(mapValue)
sorted := &SortedMap{
Key: key,
Value: value,
}
sort.Stable(sorted)
return sorted
}
// compare compares two values of the same type. It returns -1, 0, 1
// according to whether a > b (1), a == b (0), or a < b (-1).
// If the types differ, it returns -1.
// See the comment on Sort for the comparison rules.
func compare(aVal, bVal reflect.Value) int {
aType, bType := aVal.Type(), bVal.Type()
if aType != bType {
return -1 // No good answer possible, but don't return 0: they're not equal.
}
switch aVal.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
a, b := aVal.Int(), bVal.Int()
switch {
case a < b:
return -1
case a > b:
return 1
default:
return 0
}
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
a, b := aVal.Uint(), bVal.Uint()
switch {
case a < b:
return -1
case a > b:
return 1
default:
return 0
}
case reflect.String:
a, b := aVal.String(), bVal.String()
switch {
case a < b:
return -1
case a > b:
return 1
default:
return 0
}
case reflect.Float32, reflect.Float64:
return floatCompare(aVal.Float(), bVal.Float())
case reflect.Complex64, reflect.Complex128:
a, b := aVal.Complex(), bVal.Complex()
if c := floatCompare(real(a), real(b)); c != 0 {
return c
}
return floatCompare(imag(a), imag(b))
case reflect.Bool:
a, b := aVal.Bool(), bVal.Bool()
switch {
case a == b:
return 0
case a:
return 1
default:
return -1
}
case reflect.Ptr:
a, b := aVal.Pointer(), bVal.Pointer()
switch {
case a < b:
return -1
case a > b:
return 1
default:
return 0
}
case reflect.Chan:
if c, ok := nilCompare(aVal, bVal); ok {
return c
}
ap, bp := aVal.Pointer(), bVal.Pointer()
switch {
case ap < bp:
return -1
case ap > bp:
return 1
default:
return 0
}
case reflect.Struct:
for i := 0; i < aVal.NumField(); i++ {
if c := compare(aVal.Field(i), bVal.Field(i)); c != 0 {
return c
}
}
return 0
case reflect.Array:
for i := 0; i < aVal.Len(); i++ {
if c := compare(aVal.Index(i), bVal.Index(i)); c != 0 {
return c
}
}
return 0
case reflect.Interface:
if c, ok := nilCompare(aVal, bVal); ok {
return c
}
c := compare(reflect.ValueOf(aVal.Elem().Type()), reflect.ValueOf(bVal.Elem().Type()))
if c != 0 {
return c
}
return compare(aVal.Elem(), bVal.Elem())
default:
// Certain types cannot appear as keys (maps, funcs, slices), but be explicit.
panic("bad type in compare: " + aType.String())
}
}
// nilCompare checks whether either value is nil. If not, the boolean is false.
// If either value is nil, the boolean is true and the integer is the comparison
// value. The comparison is defined to be 0 if both are nil, otherwise the one
// nil value compares low. Both arguments must represent a chan, func,
// interface, map, pointer, or slice.
func nilCompare(aVal, bVal reflect.Value) (int, bool) {
if aVal.IsNil() {
if bVal.IsNil() {
return 0, true
}
return -1, true
}
if bVal.IsNil() {
return 1, true
}
return 0, false
}
// floatCompare compares two floating-point values. NaNs compare low.
func floatCompare(a, b float64) int {
switch {
case isNaN(a):
return -1 // No good answer if b is a NaN so don't bother checking.
case isNaN(b):
return 1
case a < b:
return -1
case a > b:
return 1
}
return 0
}
func isNaN(a float64) bool {
return a != a
}

View file

@ -1,4 +1,6 @@
Copyright 2012 Keith Rarick MIT License
Copyright (c) 2012-2020 Mat Ryer, Tyler Bunnell and contributors.
Permission is hereby granted, free of charge, to any person obtaining a copy Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal of this software and associated documentation files (the "Software"), to deal
@ -7,13 +9,13 @@ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions: furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in The above copyright notice and this permission notice shall be included in all
all copies or substantial portions of the Software. copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
THE SOFTWARE. SOFTWARE.

View file

@ -0,0 +1,458 @@
package assert
import (
"bytes"
"fmt"
"reflect"
"time"
)
type CompareType int
const (
compareLess CompareType = iota - 1
compareEqual
compareGreater
)
var (
intType = reflect.TypeOf(int(1))
int8Type = reflect.TypeOf(int8(1))
int16Type = reflect.TypeOf(int16(1))
int32Type = reflect.TypeOf(int32(1))
int64Type = reflect.TypeOf(int64(1))
uintType = reflect.TypeOf(uint(1))
uint8Type = reflect.TypeOf(uint8(1))
uint16Type = reflect.TypeOf(uint16(1))
uint32Type = reflect.TypeOf(uint32(1))
uint64Type = reflect.TypeOf(uint64(1))
float32Type = reflect.TypeOf(float32(1))
float64Type = reflect.TypeOf(float64(1))
stringType = reflect.TypeOf("")
timeType = reflect.TypeOf(time.Time{})
bytesType = reflect.TypeOf([]byte{})
)
func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
obj1Value := reflect.ValueOf(obj1)
obj2Value := reflect.ValueOf(obj2)
// throughout this switch we try and avoid calling .Convert() if possible,
// as this has a pretty big performance impact
switch kind {
case reflect.Int:
{
intobj1, ok := obj1.(int)
if !ok {
intobj1 = obj1Value.Convert(intType).Interface().(int)
}
intobj2, ok := obj2.(int)
if !ok {
intobj2 = obj2Value.Convert(intType).Interface().(int)
}
if intobj1 > intobj2 {
return compareGreater, true
}
if intobj1 == intobj2 {
return compareEqual, true
}
if intobj1 < intobj2 {
return compareLess, true
}
}
case reflect.Int8:
{
int8obj1, ok := obj1.(int8)
if !ok {
int8obj1 = obj1Value.Convert(int8Type).Interface().(int8)
}
int8obj2, ok := obj2.(int8)
if !ok {
int8obj2 = obj2Value.Convert(int8Type).Interface().(int8)
}
if int8obj1 > int8obj2 {
return compareGreater, true
}
if int8obj1 == int8obj2 {
return compareEqual, true
}
if int8obj1 < int8obj2 {
return compareLess, true
}
}
case reflect.Int16:
{
int16obj1, ok := obj1.(int16)
if !ok {
int16obj1 = obj1Value.Convert(int16Type).Interface().(int16)
}
int16obj2, ok := obj2.(int16)
if !ok {
int16obj2 = obj2Value.Convert(int16Type).Interface().(int16)
}
if int16obj1 > int16obj2 {
return compareGreater, true
}
if int16obj1 == int16obj2 {
return compareEqual, true
}
if int16obj1 < int16obj2 {
return compareLess, true
}
}
case reflect.Int32:
{
int32obj1, ok := obj1.(int32)
if !ok {
int32obj1 = obj1Value.Convert(int32Type).Interface().(int32)
}
int32obj2, ok := obj2.(int32)
if !ok {
int32obj2 = obj2Value.Convert(int32Type).Interface().(int32)
}
if int32obj1 > int32obj2 {
return compareGreater, true
}
if int32obj1 == int32obj2 {
return compareEqual, true
}
if int32obj1 < int32obj2 {
return compareLess, true
}
}
case reflect.Int64:
{
int64obj1, ok := obj1.(int64)
if !ok {
int64obj1 = obj1Value.Convert(int64Type).Interface().(int64)
}
int64obj2, ok := obj2.(int64)
if !ok {
int64obj2 = obj2Value.Convert(int64Type).Interface().(int64)
}
if int64obj1 > int64obj2 {
return compareGreater, true
}
if int64obj1 == int64obj2 {
return compareEqual, true
}
if int64obj1 < int64obj2 {
return compareLess, true
}
}
case reflect.Uint:
{
uintobj1, ok := obj1.(uint)
if !ok {
uintobj1 = obj1Value.Convert(uintType).Interface().(uint)
}
uintobj2, ok := obj2.(uint)
if !ok {
uintobj2 = obj2Value.Convert(uintType).Interface().(uint)
}
if uintobj1 > uintobj2 {
return compareGreater, true
}
if uintobj1 == uintobj2 {
return compareEqual, true
}
if uintobj1 < uintobj2 {
return compareLess, true
}
}
case reflect.Uint8:
{
uint8obj1, ok := obj1.(uint8)
if !ok {
uint8obj1 = obj1Value.Convert(uint8Type).Interface().(uint8)
}
uint8obj2, ok := obj2.(uint8)
if !ok {
uint8obj2 = obj2Value.Convert(uint8Type).Interface().(uint8)
}
if uint8obj1 > uint8obj2 {
return compareGreater, true
}
if uint8obj1 == uint8obj2 {
return compareEqual, true
}
if uint8obj1 < uint8obj2 {
return compareLess, true
}
}
case reflect.Uint16:
{
uint16obj1, ok := obj1.(uint16)
if !ok {
uint16obj1 = obj1Value.Convert(uint16Type).Interface().(uint16)
}
uint16obj2, ok := obj2.(uint16)
if !ok {
uint16obj2 = obj2Value.Convert(uint16Type).Interface().(uint16)
}
if uint16obj1 > uint16obj2 {
return compareGreater, true
}
if uint16obj1 == uint16obj2 {
return compareEqual, true
}
if uint16obj1 < uint16obj2 {
return compareLess, true
}
}
case reflect.Uint32:
{
uint32obj1, ok := obj1.(uint32)
if !ok {
uint32obj1 = obj1Value.Convert(uint32Type).Interface().(uint32)
}
uint32obj2, ok := obj2.(uint32)
if !ok {
uint32obj2 = obj2Value.Convert(uint32Type).Interface().(uint32)
}
if uint32obj1 > uint32obj2 {
return compareGreater, true
}
if uint32obj1 == uint32obj2 {
return compareEqual, true
}
if uint32obj1 < uint32obj2 {
return compareLess, true
}
}
case reflect.Uint64:
{
uint64obj1, ok := obj1.(uint64)
if !ok {
uint64obj1 = obj1Value.Convert(uint64Type).Interface().(uint64)
}
uint64obj2, ok := obj2.(uint64)
if !ok {
uint64obj2 = obj2Value.Convert(uint64Type).Interface().(uint64)
}
if uint64obj1 > uint64obj2 {
return compareGreater, true
}
if uint64obj1 == uint64obj2 {
return compareEqual, true
}
if uint64obj1 < uint64obj2 {
return compareLess, true
}
}
case reflect.Float32:
{
float32obj1, ok := obj1.(float32)
if !ok {
float32obj1 = obj1Value.Convert(float32Type).Interface().(float32)
}
float32obj2, ok := obj2.(float32)
if !ok {
float32obj2 = obj2Value.Convert(float32Type).Interface().(float32)
}
if float32obj1 > float32obj2 {
return compareGreater, true
}
if float32obj1 == float32obj2 {
return compareEqual, true
}
if float32obj1 < float32obj2 {
return compareLess, true
}
}
case reflect.Float64:
{
float64obj1, ok := obj1.(float64)
if !ok {
float64obj1 = obj1Value.Convert(float64Type).Interface().(float64)
}
float64obj2, ok := obj2.(float64)
if !ok {
float64obj2 = obj2Value.Convert(float64Type).Interface().(float64)
}
if float64obj1 > float64obj2 {
return compareGreater, true
}
if float64obj1 == float64obj2 {
return compareEqual, true
}
if float64obj1 < float64obj2 {
return compareLess, true
}
}
case reflect.String:
{
stringobj1, ok := obj1.(string)
if !ok {
stringobj1 = obj1Value.Convert(stringType).Interface().(string)
}
stringobj2, ok := obj2.(string)
if !ok {
stringobj2 = obj2Value.Convert(stringType).Interface().(string)
}
if stringobj1 > stringobj2 {
return compareGreater, true
}
if stringobj1 == stringobj2 {
return compareEqual, true
}
if stringobj1 < stringobj2 {
return compareLess, true
}
}
// Check for known struct types we can check for compare results.
case reflect.Struct:
{
// All structs enter here. We're not interested in most types.
if !canConvert(obj1Value, timeType) {
break
}
// time.Time can compared!
timeObj1, ok := obj1.(time.Time)
if !ok {
timeObj1 = obj1Value.Convert(timeType).Interface().(time.Time)
}
timeObj2, ok := obj2.(time.Time)
if !ok {
timeObj2 = obj2Value.Convert(timeType).Interface().(time.Time)
}
return compare(timeObj1.UnixNano(), timeObj2.UnixNano(), reflect.Int64)
}
case reflect.Slice:
{
// We only care about the []byte type.
if !canConvert(obj1Value, bytesType) {
break
}
// []byte can be compared!
bytesObj1, ok := obj1.([]byte)
if !ok {
bytesObj1 = obj1Value.Convert(bytesType).Interface().([]byte)
}
bytesObj2, ok := obj2.([]byte)
if !ok {
bytesObj2 = obj2Value.Convert(bytesType).Interface().([]byte)
}
return CompareType(bytes.Compare(bytesObj1, bytesObj2)), true
}
}
return compareEqual, false
}
// Greater asserts that the first element is greater than the second
//
// assert.Greater(t, 2, 1)
// assert.Greater(t, float64(2), float64(1))
// assert.Greater(t, "b", "a")
func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return compareTwoValues(t, e1, e2, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...)
}
// GreaterOrEqual asserts that the first element is greater than or equal to the second
//
// assert.GreaterOrEqual(t, 2, 1)
// assert.GreaterOrEqual(t, 2, 2)
// assert.GreaterOrEqual(t, "b", "a")
// assert.GreaterOrEqual(t, "b", "b")
func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return compareTwoValues(t, e1, e2, []CompareType{compareGreater, compareEqual}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...)
}
// Less asserts that the first element is less than the second
//
// assert.Less(t, 1, 2)
// assert.Less(t, float64(1), float64(2))
// assert.Less(t, "a", "b")
func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return compareTwoValues(t, e1, e2, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...)
}
// LessOrEqual asserts that the first element is less than or equal to the second
//
// assert.LessOrEqual(t, 1, 2)
// assert.LessOrEqual(t, 2, 2)
// assert.LessOrEqual(t, "a", "b")
// assert.LessOrEqual(t, "b", "b")
func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return compareTwoValues(t, e1, e2, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...)
}
// Positive asserts that the specified element is positive
//
// assert.Positive(t, 1)
// assert.Positive(t, 1.23)
func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
zero := reflect.Zero(reflect.TypeOf(e))
return compareTwoValues(t, e, zero.Interface(), []CompareType{compareGreater}, "\"%v\" is not positive", msgAndArgs...)
}
// Negative asserts that the specified element is negative
//
// assert.Negative(t, -1)
// assert.Negative(t, -1.23)
func Negative(t TestingT, e interface{}, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
zero := reflect.Zero(reflect.TypeOf(e))
return compareTwoValues(t, e, zero.Interface(), []CompareType{compareLess}, "\"%v\" is not negative", msgAndArgs...)
}
func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedComparesResults []CompareType, failMessage string, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
e1Kind := reflect.ValueOf(e1).Kind()
e2Kind := reflect.ValueOf(e2).Kind()
if e1Kind != e2Kind {
return Fail(t, "Elements should be the same type", msgAndArgs...)
}
compareResult, isComparable := compare(e1, e2, e1Kind)
if !isComparable {
return Fail(t, fmt.Sprintf("Can not compare type \"%s\"", reflect.TypeOf(e1)), msgAndArgs...)
}
if !containsValue(allowedComparesResults, compareResult) {
return Fail(t, fmt.Sprintf(failMessage, e1, e2), msgAndArgs...)
}
return true
}
func containsValue(values []CompareType, value CompareType) bool {
for _, v := range values {
if v == value {
return true
}
}
return false
}

View file

@ -0,0 +1,16 @@
//go:build go1.17
// +build go1.17
// TODO: once support for Go 1.16 is dropped, this file can be
// merged/removed with assertion_compare_go1.17_test.go and
// assertion_compare_legacy.go
package assert
import "reflect"
// Wrapper around reflect.Value.CanConvert, for compatibility
// reasons.
func canConvert(value reflect.Value, to reflect.Type) bool {
return value.CanConvert(to)
}

View file

@ -0,0 +1,16 @@
//go:build !go1.17
// +build !go1.17
// TODO: once support for Go 1.16 is dropped, this file can be
// merged/removed with assertion_compare_go1.17_test.go and
// assertion_compare_can_convert.go
package assert
import "reflect"
// Older versions of Go does not have the reflect.Value.CanConvert
// method.
func canConvert(value reflect.Value, to reflect.Type) bool {
return false
}

View file

@ -0,0 +1,805 @@
/*
* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen
* THIS FILE MUST NOT BE EDITED BY HAND
*/
package assert
import (
http "net/http"
url "net/url"
time "time"
)
// Conditionf uses a Comparison to assert a complex condition.
func Conditionf(t TestingT, comp Comparison, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return Condition(t, comp, append([]interface{}{msg}, args...)...)
}
// Containsf asserts that the specified string, list(array, slice...) or map contains the
// specified substring or element.
//
// assert.Containsf(t, "Hello World", "World", "error message %s", "formatted")
// assert.Containsf(t, ["Hello", "World"], "World", "error message %s", "formatted")
// assert.Containsf(t, {"Hello": "World"}, "Hello", "error message %s", "formatted")
func Containsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return Contains(t, s, contains, append([]interface{}{msg}, args...)...)
}
// DirExistsf checks whether a directory exists in the given path. It also fails
// if the path is a file rather a directory or there is an error checking whether it exists.
func DirExistsf(t TestingT, path string, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return DirExists(t, path, append([]interface{}{msg}, args...)...)
}
// ElementsMatchf asserts that the specified listA(array, slice...) is equal to specified
// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements,
// the number of appearances of each of them in both lists should match.
//
// assert.ElementsMatchf(t, [1, 3, 2, 3], [1, 3, 3, 2], "error message %s", "formatted")
func ElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return ElementsMatch(t, listA, listB, append([]interface{}{msg}, args...)...)
}
// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either
// a slice or a channel with len == 0.
//
// assert.Emptyf(t, obj, "error message %s", "formatted")
func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return Empty(t, object, append([]interface{}{msg}, args...)...)
}
// Equalf asserts that two objects are equal.
//
// assert.Equalf(t, 123, 123, "error message %s", "formatted")
//
// Pointer variable equality is determined based on the equality of the
// referenced values (as opposed to the memory addresses). Function equality
// cannot be determined and will always fail.
func Equalf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return Equal(t, expected, actual, append([]interface{}{msg}, args...)...)
}
// EqualErrorf asserts that a function returned an error (i.e. not `nil`)
// and that it is equal to the provided error.
//
// actualObj, err := SomeFunction()
// assert.EqualErrorf(t, err, expectedErrorString, "error message %s", "formatted")
func EqualErrorf(t TestingT, theError error, errString string, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return EqualError(t, theError, errString, append([]interface{}{msg}, args...)...)
}
// EqualExportedValuesf asserts that the types of two objects are equal and their public
// fields are also equal. This is useful for comparing structs that have private fields
// that could potentially differ.
//
// type S struct {
// Exported int
// notExported int
// }
// assert.EqualExportedValuesf(t, S{1, 2}, S{1, 3}, "error message %s", "formatted") => true
// assert.EqualExportedValuesf(t, S{1, 2}, S{2, 3}, "error message %s", "formatted") => false
func EqualExportedValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return EqualExportedValues(t, expected, actual, append([]interface{}{msg}, args...)...)
}
// EqualValuesf asserts that two objects are equal or convertable to the same types
// and equal.
//
// assert.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted")
func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return EqualValues(t, expected, actual, append([]interface{}{msg}, args...)...)
}
// Errorf asserts that a function returned an error (i.e. not `nil`).
//
// actualObj, err := SomeFunction()
// if assert.Errorf(t, err, "error message %s", "formatted") {
// assert.Equal(t, expectedErrorf, err)
// }
func Errorf(t TestingT, err error, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return Error(t, err, append([]interface{}{msg}, args...)...)
}
// ErrorAsf asserts that at least one of the errors in err's chain matches target, and if so, sets target to that error value.
// This is a wrapper for errors.As.
func ErrorAsf(t TestingT, err error, target interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return ErrorAs(t, err, target, append([]interface{}{msg}, args...)...)
}
// ErrorContainsf asserts that a function returned an error (i.e. not `nil`)
// and that the error contains the specified substring.
//
// actualObj, err := SomeFunction()
// assert.ErrorContainsf(t, err, expectedErrorSubString, "error message %s", "formatted")
func ErrorContainsf(t TestingT, theError error, contains string, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return ErrorContains(t, theError, contains, append([]interface{}{msg}, args...)...)
}
// ErrorIsf asserts that at least one of the errors in err's chain matches target.
// This is a wrapper for errors.Is.
func ErrorIsf(t TestingT, err error, target error, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return ErrorIs(t, err, target, append([]interface{}{msg}, args...)...)
}
// Eventuallyf asserts that given condition will be met in waitFor time,
// periodically checking target function each tick.
//
// assert.Eventuallyf(t, func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted")
func Eventuallyf(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return Eventually(t, condition, waitFor, tick, append([]interface{}{msg}, args...)...)
}
// EventuallyWithTf asserts that given condition will be met in waitFor time,
// periodically checking target function each tick. In contrast to Eventually,
// it supplies a CollectT to the condition function, so that the condition
// function can use the CollectT to call other assertions.
// The condition is considered "met" if no errors are raised in a tick.
// The supplied CollectT collects all errors from one tick (if there are any).
// If the condition is not met before waitFor, the collected errors of
// the last tick are copied to t.
//
// externalValue := false
// go func() {
// time.Sleep(8*time.Second)
// externalValue = true
// }()
// assert.EventuallyWithTf(t, func(c *assert.CollectT, "error message %s", "formatted") {
// // add assertions as needed; any assertion failure will fail the current tick
// assert.True(c, externalValue, "expected 'externalValue' to be true")
// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false")
func EventuallyWithTf(t TestingT, condition func(collect *CollectT), waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return EventuallyWithT(t, condition, waitFor, tick, append([]interface{}{msg}, args...)...)
}
// Exactlyf asserts that two objects are equal in value and type.
//
// assert.Exactlyf(t, int32(123), int64(123), "error message %s", "formatted")
func Exactlyf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return Exactly(t, expected, actual, append([]interface{}{msg}, args...)...)
}
// Failf reports a failure through
func Failf(t TestingT, failureMessage string, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return Fail(t, failureMessage, append([]interface{}{msg}, args...)...)
}
// FailNowf fails test
func FailNowf(t TestingT, failureMessage string, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return FailNow(t, failureMessage, append([]interface{}{msg}, args...)...)
}
// Falsef asserts that the specified value is false.
//
// assert.Falsef(t, myBool, "error message %s", "formatted")
func Falsef(t TestingT, value bool, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return False(t, value, append([]interface{}{msg}, args...)...)
}
// FileExistsf checks whether a file exists in the given path. It also fails if
// the path points to a directory or there is an error when trying to check the file.
func FileExistsf(t TestingT, path string, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return FileExists(t, path, append([]interface{}{msg}, args...)...)
}
// Greaterf asserts that the first element is greater than the second
//
// assert.Greaterf(t, 2, 1, "error message %s", "formatted")
// assert.Greaterf(t, float64(2), float64(1), "error message %s", "formatted")
// assert.Greaterf(t, "b", "a", "error message %s", "formatted")
func Greaterf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return Greater(t, e1, e2, append([]interface{}{msg}, args...)...)
}
// GreaterOrEqualf asserts that the first element is greater than or equal to the second
//
// assert.GreaterOrEqualf(t, 2, 1, "error message %s", "formatted")
// assert.GreaterOrEqualf(t, 2, 2, "error message %s", "formatted")
// assert.GreaterOrEqualf(t, "b", "a", "error message %s", "formatted")
// assert.GreaterOrEqualf(t, "b", "b", "error message %s", "formatted")
func GreaterOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return GreaterOrEqual(t, e1, e2, append([]interface{}{msg}, args...)...)
}
// HTTPBodyContainsf asserts that a specified handler returns a
// body that contains a string.
//
// assert.HTTPBodyContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
//
// Returns whether the assertion was successful (true) or not (false).
func HTTPBodyContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return HTTPBodyContains(t, handler, method, url, values, str, append([]interface{}{msg}, args...)...)
}
// HTTPBodyNotContainsf asserts that a specified handler returns a
// body that does not contain a string.
//
// assert.HTTPBodyNotContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
//
// Returns whether the assertion was successful (true) or not (false).
func HTTPBodyNotContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return HTTPBodyNotContains(t, handler, method, url, values, str, append([]interface{}{msg}, args...)...)
}
// HTTPErrorf asserts that a specified handler returns an error status code.
//
// assert.HTTPErrorf(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
//
// Returns whether the assertion was successful (true) or not (false).
func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return HTTPError(t, handler, method, url, values, append([]interface{}{msg}, args...)...)
}
// HTTPRedirectf asserts that a specified handler returns a redirect status code.
//
// assert.HTTPRedirectf(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
//
// Returns whether the assertion was successful (true) or not (false).
func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return HTTPRedirect(t, handler, method, url, values, append([]interface{}{msg}, args...)...)
}
// HTTPStatusCodef asserts that a specified handler returns a specified status code.
//
// assert.HTTPStatusCodef(t, myHandler, "GET", "/notImplemented", nil, 501, "error message %s", "formatted")
//
// Returns whether the assertion was successful (true) or not (false).
func HTTPStatusCodef(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return HTTPStatusCode(t, handler, method, url, values, statuscode, append([]interface{}{msg}, args...)...)
}
// HTTPSuccessf asserts that a specified handler returns a success status code.
//
// assert.HTTPSuccessf(t, myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted")
//
// Returns whether the assertion was successful (true) or not (false).
func HTTPSuccessf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return HTTPSuccess(t, handler, method, url, values, append([]interface{}{msg}, args...)...)
}
// Implementsf asserts that an object is implemented by the specified interface.
//
// assert.Implementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted")
func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return Implements(t, interfaceObject, object, append([]interface{}{msg}, args...)...)
}
// InDeltaf asserts that the two numerals are within delta of each other.
//
// assert.InDeltaf(t, math.Pi, 22/7.0, 0.01, "error message %s", "formatted")
func InDeltaf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return InDelta(t, expected, actual, delta, append([]interface{}{msg}, args...)...)
}
// InDeltaMapValuesf is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys.
func InDeltaMapValuesf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return InDeltaMapValues(t, expected, actual, delta, append([]interface{}{msg}, args...)...)
}
// InDeltaSlicef is the same as InDelta, except it compares two slices.
func InDeltaSlicef(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return InDeltaSlice(t, expected, actual, delta, append([]interface{}{msg}, args...)...)
}
// InEpsilonf asserts that expected and actual have a relative error less than epsilon
func InEpsilonf(t TestingT, expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return InEpsilon(t, expected, actual, epsilon, append([]interface{}{msg}, args...)...)
}
// InEpsilonSlicef is the same as InEpsilon, except it compares each value from two slices.
func InEpsilonSlicef(t TestingT, expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return InEpsilonSlice(t, expected, actual, epsilon, append([]interface{}{msg}, args...)...)
}
// IsDecreasingf asserts that the collection is decreasing
//
// assert.IsDecreasingf(t, []int{2, 1, 0}, "error message %s", "formatted")
// assert.IsDecreasingf(t, []float{2, 1}, "error message %s", "formatted")
// assert.IsDecreasingf(t, []string{"b", "a"}, "error message %s", "formatted")
func IsDecreasingf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return IsDecreasing(t, object, append([]interface{}{msg}, args...)...)
}
// IsIncreasingf asserts that the collection is increasing
//
// assert.IsIncreasingf(t, []int{1, 2, 3}, "error message %s", "formatted")
// assert.IsIncreasingf(t, []float{1, 2}, "error message %s", "formatted")
// assert.IsIncreasingf(t, []string{"a", "b"}, "error message %s", "formatted")
func IsIncreasingf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return IsIncreasing(t, object, append([]interface{}{msg}, args...)...)
}
// IsNonDecreasingf asserts that the collection is not decreasing
//
// assert.IsNonDecreasingf(t, []int{1, 1, 2}, "error message %s", "formatted")
// assert.IsNonDecreasingf(t, []float{1, 2}, "error message %s", "formatted")
// assert.IsNonDecreasingf(t, []string{"a", "b"}, "error message %s", "formatted")
func IsNonDecreasingf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return IsNonDecreasing(t, object, append([]interface{}{msg}, args...)...)
}
// IsNonIncreasingf asserts that the collection is not increasing
//
// assert.IsNonIncreasingf(t, []int{2, 1, 1}, "error message %s", "formatted")
// assert.IsNonIncreasingf(t, []float{2, 1}, "error message %s", "formatted")
// assert.IsNonIncreasingf(t, []string{"b", "a"}, "error message %s", "formatted")
func IsNonIncreasingf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return IsNonIncreasing(t, object, append([]interface{}{msg}, args...)...)
}
// IsTypef asserts that the specified objects are of the same type.
func IsTypef(t TestingT, expectedType interface{}, object interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return IsType(t, expectedType, object, append([]interface{}{msg}, args...)...)
}
// JSONEqf asserts that two JSON strings are equivalent.
//
// assert.JSONEqf(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted")
func JSONEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return JSONEq(t, expected, actual, append([]interface{}{msg}, args...)...)
}
// Lenf asserts that the specified object has specific length.
// Lenf also fails if the object has a type that len() not accept.
//
// assert.Lenf(t, mySlice, 3, "error message %s", "formatted")
func Lenf(t TestingT, object interface{}, length int, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return Len(t, object, length, append([]interface{}{msg}, args...)...)
}
// Lessf asserts that the first element is less than the second
//
// assert.Lessf(t, 1, 2, "error message %s", "formatted")
// assert.Lessf(t, float64(1), float64(2), "error message %s", "formatted")
// assert.Lessf(t, "a", "b", "error message %s", "formatted")
func Lessf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return Less(t, e1, e2, append([]interface{}{msg}, args...)...)
}
// LessOrEqualf asserts that the first element is less than or equal to the second
//
// assert.LessOrEqualf(t, 1, 2, "error message %s", "formatted")
// assert.LessOrEqualf(t, 2, 2, "error message %s", "formatted")
// assert.LessOrEqualf(t, "a", "b", "error message %s", "formatted")
// assert.LessOrEqualf(t, "b", "b", "error message %s", "formatted")
func LessOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return LessOrEqual(t, e1, e2, append([]interface{}{msg}, args...)...)
}
// Negativef asserts that the specified element is negative
//
// assert.Negativef(t, -1, "error message %s", "formatted")
// assert.Negativef(t, -1.23, "error message %s", "formatted")
func Negativef(t TestingT, e interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return Negative(t, e, append([]interface{}{msg}, args...)...)
}
// Neverf asserts that the given condition doesn't satisfy in waitFor time,
// periodically checking the target function each tick.
//
// assert.Neverf(t, func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted")
func Neverf(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return Never(t, condition, waitFor, tick, append([]interface{}{msg}, args...)...)
}
// Nilf asserts that the specified object is nil.
//
// assert.Nilf(t, err, "error message %s", "formatted")
func Nilf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return Nil(t, object, append([]interface{}{msg}, args...)...)
}
// NoDirExistsf checks whether a directory does not exist in the given path.
// It fails if the path points to an existing _directory_ only.
func NoDirExistsf(t TestingT, path string, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return NoDirExists(t, path, append([]interface{}{msg}, args...)...)
}
// NoErrorf asserts that a function returned no error (i.e. `nil`).
//
// actualObj, err := SomeFunction()
// if assert.NoErrorf(t, err, "error message %s", "formatted") {
// assert.Equal(t, expectedObj, actualObj)
// }
func NoErrorf(t TestingT, err error, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return NoError(t, err, append([]interface{}{msg}, args...)...)
}
// NoFileExistsf checks whether a file does not exist in a given path. It fails
// if the path points to an existing _file_ only.
func NoFileExistsf(t TestingT, path string, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return NoFileExists(t, path, append([]interface{}{msg}, args...)...)
}
// NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the
// specified substring or element.
//
// assert.NotContainsf(t, "Hello World", "Earth", "error message %s", "formatted")
// assert.NotContainsf(t, ["Hello", "World"], "Earth", "error message %s", "formatted")
// assert.NotContainsf(t, {"Hello": "World"}, "Earth", "error message %s", "formatted")
func NotContainsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return NotContains(t, s, contains, append([]interface{}{msg}, args...)...)
}
// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either
// a slice or a channel with len == 0.
//
// if assert.NotEmptyf(t, obj, "error message %s", "formatted") {
// assert.Equal(t, "two", obj[1])
// }
func NotEmptyf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return NotEmpty(t, object, append([]interface{}{msg}, args...)...)
}
// NotEqualf asserts that the specified values are NOT equal.
//
// assert.NotEqualf(t, obj1, obj2, "error message %s", "formatted")
//
// Pointer variable equality is determined based on the equality of the
// referenced values (as opposed to the memory addresses).
func NotEqualf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return NotEqual(t, expected, actual, append([]interface{}{msg}, args...)...)
}
// NotEqualValuesf asserts that two objects are not equal even when converted to the same type
//
// assert.NotEqualValuesf(t, obj1, obj2, "error message %s", "formatted")
func NotEqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return NotEqualValues(t, expected, actual, append([]interface{}{msg}, args...)...)
}
// NotErrorIsf asserts that at none of the errors in err's chain matches target.
// This is a wrapper for errors.Is.
func NotErrorIsf(t TestingT, err error, target error, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return NotErrorIs(t, err, target, append([]interface{}{msg}, args...)...)
}
// NotNilf asserts that the specified object is not nil.
//
// assert.NotNilf(t, err, "error message %s", "formatted")
func NotNilf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return NotNil(t, object, append([]interface{}{msg}, args...)...)
}
// NotPanicsf asserts that the code inside the specified PanicTestFunc does NOT panic.
//
// assert.NotPanicsf(t, func(){ RemainCalm() }, "error message %s", "formatted")
func NotPanicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return NotPanics(t, f, append([]interface{}{msg}, args...)...)
}
// NotRegexpf asserts that a specified regexp does not match a string.
//
// assert.NotRegexpf(t, regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted")
// assert.NotRegexpf(t, "^start", "it's not starting", "error message %s", "formatted")
func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return NotRegexp(t, rx, str, append([]interface{}{msg}, args...)...)
}
// NotSamef asserts that two pointers do not reference the same object.
//
// assert.NotSamef(t, ptr1, ptr2, "error message %s", "formatted")
//
// Both arguments must be pointer variables. Pointer variable sameness is
// determined based on the equality of both type and value.
func NotSamef(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return NotSame(t, expected, actual, append([]interface{}{msg}, args...)...)
}
// NotSubsetf asserts that the specified list(array, slice...) contains not all
// elements given in the specified subset(array, slice...).
//
// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted")
func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return NotSubset(t, list, subset, append([]interface{}{msg}, args...)...)
}
// NotZerof asserts that i is not the zero value for its type.
func NotZerof(t TestingT, i interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return NotZero(t, i, append([]interface{}{msg}, args...)...)
}
// Panicsf asserts that the code inside the specified PanicTestFunc panics.
//
// assert.Panicsf(t, func(){ GoCrazy() }, "error message %s", "formatted")
func Panicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return Panics(t, f, append([]interface{}{msg}, args...)...)
}
// PanicsWithErrorf asserts that the code inside the specified PanicTestFunc
// panics, and that the recovered panic value is an error that satisfies the
// EqualError comparison.
//
// assert.PanicsWithErrorf(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted")
func PanicsWithErrorf(t TestingT, errString string, f PanicTestFunc, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return PanicsWithError(t, errString, f, append([]interface{}{msg}, args...)...)
}
// PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that
// the recovered panic value equals the expected panic value.
//
// assert.PanicsWithValuef(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted")
func PanicsWithValuef(t TestingT, expected interface{}, f PanicTestFunc, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return PanicsWithValue(t, expected, f, append([]interface{}{msg}, args...)...)
}
// Positivef asserts that the specified element is positive
//
// assert.Positivef(t, 1, "error message %s", "formatted")
// assert.Positivef(t, 1.23, "error message %s", "formatted")
func Positivef(t TestingT, e interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return Positive(t, e, append([]interface{}{msg}, args...)...)
}
// Regexpf asserts that a specified regexp matches a string.
//
// assert.Regexpf(t, regexp.MustCompile("start"), "it's starting", "error message %s", "formatted")
// assert.Regexpf(t, "start...$", "it's not starting", "error message %s", "formatted")
func Regexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return Regexp(t, rx, str, append([]interface{}{msg}, args...)...)
}
// Samef asserts that two pointers reference the same object.
//
// assert.Samef(t, ptr1, ptr2, "error message %s", "formatted")
//
// Both arguments must be pointer variables. Pointer variable sameness is
// determined based on the equality of both type and value.
func Samef(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return Same(t, expected, actual, append([]interface{}{msg}, args...)...)
}
// Subsetf asserts that the specified list(array, slice...) contains all
// elements given in the specified subset(array, slice...).
//
// assert.Subsetf(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted")
func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return Subset(t, list, subset, append([]interface{}{msg}, args...)...)
}
// Truef asserts that the specified value is true.
//
// assert.Truef(t, myBool, "error message %s", "formatted")
func Truef(t TestingT, value bool, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return True(t, value, append([]interface{}{msg}, args...)...)
}
// WithinDurationf asserts that the two times are within duration delta of each other.
//
// assert.WithinDurationf(t, time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted")
func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return WithinDuration(t, expected, actual, delta, append([]interface{}{msg}, args...)...)
}
// WithinRangef asserts that a time is within a time range (inclusive).
//
// assert.WithinRangef(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted")
func WithinRangef(t TestingT, actual time.Time, start time.Time, end time.Time, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return WithinRange(t, actual, start, end, append([]interface{}{msg}, args...)...)
}
// YAMLEqf asserts that two YAML strings are equivalent.
func YAMLEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return YAMLEq(t, expected, actual, append([]interface{}{msg}, args...)...)
}
// Zerof asserts that i is the zero value for its type.
func Zerof(t TestingT, i interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return Zero(t, i, append([]interface{}{msg}, args...)...)
}

View file

@ -0,0 +1,5 @@
{{.CommentFormat}}
func {{.DocInfo.Name}}f(t TestingT, {{.ParamsFormat}}) bool {
if h, ok := t.(tHelper); ok { h.Helper() }
return {{.DocInfo.Name}}(t, {{.ForwardedParamsFormat}})
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,5 @@
{{.CommentWithoutT "a"}}
func (a *Assertions) {{.DocInfo.Name}}({{.Params}}) bool {
if h, ok := a.t.(tHelper); ok { h.Helper() }
return {{.DocInfo.Name}}(a.t, {{.ForwardedParams}})
}

View file

@ -0,0 +1,81 @@
package assert
import (
"fmt"
"reflect"
)
// isOrdered checks that collection contains orderable elements.
func isOrdered(t TestingT, object interface{}, allowedComparesResults []CompareType, failMessage string, msgAndArgs ...interface{}) bool {
objKind := reflect.TypeOf(object).Kind()
if objKind != reflect.Slice && objKind != reflect.Array {
return false
}
objValue := reflect.ValueOf(object)
objLen := objValue.Len()
if objLen <= 1 {
return true
}
value := objValue.Index(0)
valueInterface := value.Interface()
firstValueKind := value.Kind()
for i := 1; i < objLen; i++ {
prevValue := value
prevValueInterface := valueInterface
value = objValue.Index(i)
valueInterface = value.Interface()
compareResult, isComparable := compare(prevValueInterface, valueInterface, firstValueKind)
if !isComparable {
return Fail(t, fmt.Sprintf("Can not compare type \"%s\" and \"%s\"", reflect.TypeOf(value), reflect.TypeOf(prevValue)), msgAndArgs...)
}
if !containsValue(allowedComparesResults, compareResult) {
return Fail(t, fmt.Sprintf(failMessage, prevValue, value), msgAndArgs...)
}
}
return true
}
// IsIncreasing asserts that the collection is increasing
//
// assert.IsIncreasing(t, []int{1, 2, 3})
// assert.IsIncreasing(t, []float{1, 2})
// assert.IsIncreasing(t, []string{"a", "b"})
func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
return isOrdered(t, object, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...)
}
// IsNonIncreasing asserts that the collection is not increasing
//
// assert.IsNonIncreasing(t, []int{2, 1, 1})
// assert.IsNonIncreasing(t, []float{2, 1})
// assert.IsNonIncreasing(t, []string{"b", "a"})
func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
return isOrdered(t, object, []CompareType{compareEqual, compareGreater}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...)
}
// IsDecreasing asserts that the collection is decreasing
//
// assert.IsDecreasing(t, []int{2, 1, 0})
// assert.IsDecreasing(t, []float{2, 1})
// assert.IsDecreasing(t, []string{"b", "a"})
func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
return isOrdered(t, object, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...)
}
// IsNonDecreasing asserts that the collection is not decreasing
//
// assert.IsNonDecreasing(t, []int{1, 1, 2})
// assert.IsNonDecreasing(t, []float{1, 2})
// assert.IsNonDecreasing(t, []string{"a", "b"})
func IsNonDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
return isOrdered(t, object, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...)
}

2054
vendor/github.com/stretchr/testify/assert/assertions.go generated vendored Normal file

File diff suppressed because it is too large Load diff

46
vendor/github.com/stretchr/testify/assert/doc.go generated vendored Normal file
View file

@ -0,0 +1,46 @@
// Package assert provides a set of comprehensive testing tools for use with the normal Go testing system.
//
// # Example Usage
//
// The following is a complete example using assert in a standard test function:
//
// import (
// "testing"
// "github.com/stretchr/testify/assert"
// )
//
// func TestSomething(t *testing.T) {
//
// var a string = "Hello"
// var b string = "Hello"
//
// assert.Equal(t, a, b, "The two words should be the same.")
//
// }
//
// if you assert many times, use the format below:
//
// import (
// "testing"
// "github.com/stretchr/testify/assert"
// )
//
// func TestSomething(t *testing.T) {
// assert := assert.New(t)
//
// var a string = "Hello"
// var b string = "Hello"
//
// assert.Equal(a, b, "The two words should be the same.")
// }
//
// # Assertions
//
// Assertions allow you to easily write test code, and are global funcs in the `assert` package.
// All assertion functions take, as the first argument, the `*testing.T` object provided by the
// testing framework. This allows the assertion funcs to write the failings and other details to
// the correct place.
//
// Every assertion function also takes an optional string message as the final argument,
// allowing custom error messages to be appended to the message the assertion method outputs.
package assert

10
vendor/github.com/stretchr/testify/assert/errors.go generated vendored Normal file
View file

@ -0,0 +1,10 @@
package assert
import (
"errors"
)
// AnError is an error instance useful for testing. If the code does not care
// about error specifics, and only needs to return the error for example, this
// error should be used to make the test code more readable.
var AnError = errors.New("assert.AnError general error for testing")

View file

@ -0,0 +1,16 @@
package assert
// Assertions provides assertion methods around the
// TestingT interface.
type Assertions struct {
t TestingT
}
// New makes a new Assertions object for the specified TestingT.
func New(t TestingT) *Assertions {
return &Assertions{
t: t,
}
}
//go:generate sh -c "cd ../_codegen && go build && cd - && ../_codegen/_codegen -output-package=assert -template=assertion_forward.go.tmpl -include-format-funcs"

View file

@ -0,0 +1,162 @@
package assert
import (
"fmt"
"net/http"
"net/http/httptest"
"net/url"
"strings"
)
// httpCode is a helper that returns HTTP code of the response. It returns -1 and
// an error if building a new request fails.
func httpCode(handler http.HandlerFunc, method, url string, values url.Values) (int, error) {
w := httptest.NewRecorder()
req, err := http.NewRequest(method, url, nil)
if err != nil {
return -1, err
}
req.URL.RawQuery = values.Encode()
handler(w, req)
return w.Code, nil
}
// HTTPSuccess asserts that a specified handler returns a success status code.
//
// assert.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil)
//
// Returns whether the assertion was successful (true) or not (false).
func HTTPSuccess(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
code, err := httpCode(handler, method, url, values)
if err != nil {
Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err))
}
isSuccessCode := code >= http.StatusOK && code <= http.StatusPartialContent
if !isSuccessCode {
Fail(t, fmt.Sprintf("Expected HTTP success status code for %q but received %d", url+"?"+values.Encode(), code))
}
return isSuccessCode
}
// HTTPRedirect asserts that a specified handler returns a redirect status code.
//
// assert.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
//
// Returns whether the assertion was successful (true) or not (false).
func HTTPRedirect(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
code, err := httpCode(handler, method, url, values)
if err != nil {
Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err))
}
isRedirectCode := code >= http.StatusMultipleChoices && code <= http.StatusTemporaryRedirect
if !isRedirectCode {
Fail(t, fmt.Sprintf("Expected HTTP redirect status code for %q but received %d", url+"?"+values.Encode(), code))
}
return isRedirectCode
}
// HTTPError asserts that a specified handler returns an error status code.
//
// assert.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
//
// Returns whether the assertion was successful (true) or not (false).
func HTTPError(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
code, err := httpCode(handler, method, url, values)
if err != nil {
Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err))
}
isErrorCode := code >= http.StatusBadRequest
if !isErrorCode {
Fail(t, fmt.Sprintf("Expected HTTP error status code for %q but received %d", url+"?"+values.Encode(), code))
}
return isErrorCode
}
// HTTPStatusCode asserts that a specified handler returns a specified status code.
//
// assert.HTTPStatusCode(t, myHandler, "GET", "/notImplemented", nil, 501)
//
// Returns whether the assertion was successful (true) or not (false).
func HTTPStatusCode(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, statuscode int, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
code, err := httpCode(handler, method, url, values)
if err != nil {
Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err))
}
successful := code == statuscode
if !successful {
Fail(t, fmt.Sprintf("Expected HTTP status code %d for %q but received %d", statuscode, url+"?"+values.Encode(), code))
}
return successful
}
// HTTPBody is a helper that returns HTTP body of the response. It returns
// empty string if building a new request fails.
func HTTPBody(handler http.HandlerFunc, method, url string, values url.Values) string {
w := httptest.NewRecorder()
req, err := http.NewRequest(method, url+"?"+values.Encode(), nil)
if err != nil {
return ""
}
handler(w, req)
return w.Body.String()
}
// HTTPBodyContains asserts that a specified handler returns a
// body that contains a string.
//
// assert.HTTPBodyContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky")
//
// Returns whether the assertion was successful (true) or not (false).
func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
body := HTTPBody(handler, method, url, values)
contains := strings.Contains(body, fmt.Sprint(str))
if !contains {
Fail(t, fmt.Sprintf("Expected response body for \"%s\" to contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body))
}
return contains
}
// HTTPBodyNotContains asserts that a specified handler returns a
// body that does not contain a string.
//
// assert.HTTPBodyNotContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky")
//
// Returns whether the assertion was successful (true) or not (false).
func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
body := HTTPBody(handler, method, url, values)
contains := strings.Contains(body, fmt.Sprint(str))
if contains {
Fail(t, fmt.Sprintf("Expected response body for \"%s\" to NOT contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body))
}
return !contains
}

29
vendor/github.com/stretchr/testify/require/doc.go generated vendored Normal file
View file

@ -0,0 +1,29 @@
// Package require implements the same assertions as the `assert` package but
// stops test execution when a test fails.
//
// # Example Usage
//
// The following is a complete example using require in a standard test function:
//
// import (
// "testing"
// "github.com/stretchr/testify/require"
// )
//
// func TestSomething(t *testing.T) {
//
// var a string = "Hello"
// var b string = "Hello"
//
// require.Equal(t, a, b, "The two words should be the same.")
//
// }
//
// # Assertions
//
// The `require` package have same global functions as in the `assert` package,
// but instead of returning a boolean result they call `t.FailNow()`.
//
// Every assertion function also takes an optional string message as the final argument,
// allowing custom error messages to be appended to the message the assertion method outputs.
package require

View file

@ -0,0 +1,16 @@
package require
// Assertions provides assertion methods around the
// TestingT interface.
type Assertions struct {
t TestingT
}
// New makes a new Assertions object for the specified TestingT.
func New(t TestingT) *Assertions {
return &Assertions{
t: t,
}
}
//go:generate sh -c "cd ../_codegen && go build && cd - && ../_codegen/_codegen -output-package=require -template=require_forward.go.tmpl -include-format-funcs"

2031
vendor/github.com/stretchr/testify/require/require.go generated vendored Normal file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,6 @@
{{.Comment}}
func {{.DocInfo.Name}}(t TestingT, {{.Params}}) {
if h, ok := t.(tHelper); ok { h.Helper() }
if assert.{{.DocInfo.Name}}(t, {{.ForwardedParams}}) { return }
t.FailNow()
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,5 @@
{{.CommentWithoutT "a"}}
func (a *Assertions) {{.DocInfo.Name}}({{.Params}}) {
if h, ok := a.t.(tHelper); ok { h.Helper() }
{{.DocInfo.Name}}(a.t, {{.ForwardedParams}})
}

View file

@ -0,0 +1,29 @@
package require
// TestingT is an interface wrapper around *testing.T
type TestingT interface {
Errorf(format string, args ...interface{})
FailNow()
}
type tHelper interface {
Helper()
}
// ComparisonAssertionFunc is a common function prototype when comparing two values. Can be useful
// for table driven tests.
type ComparisonAssertionFunc func(TestingT, interface{}, interface{}, ...interface{})
// ValueAssertionFunc is a common function prototype when validating a single value. Can be useful
// for table driven tests.
type ValueAssertionFunc func(TestingT, interface{}, ...interface{})
// BoolAssertionFunc is a common function prototype when validating a bool value. Can be useful
// for table driven tests.
type BoolAssertionFunc func(TestingT, bool, ...interface{})
// ErrorAssertionFunc is a common function prototype when validating an error value. Can be useful
// for table driven tests.
type ErrorAssertionFunc func(TestingT, error, ...interface{})
//go:generate sh -c "cd ../_codegen && go build && cd - && ../_codegen/_codegen -output-package=require -template=require.go.tmpl -include-format-funcs"

66
vendor/github.com/stretchr/testify/suite/doc.go generated vendored Normal file
View file

@ -0,0 +1,66 @@
// Package suite contains logic for creating testing suite structs
// and running the methods on those structs as tests. The most useful
// piece of this package is that you can create setup/teardown methods
// on your testing suites, which will run before/after the whole suite
// or individual tests (depending on which interface(s) you
// implement).
//
// A testing suite is usually built by first extending the built-in
// suite functionality from suite.Suite in testify. Alternatively,
// you could reproduce that logic on your own if you wanted (you
// just need to implement the TestingSuite interface from
// suite/interfaces.go).
//
// After that, you can implement any of the interfaces in
// suite/interfaces.go to add setup/teardown functionality to your
// suite, and add any methods that start with "Test" to add tests.
// Methods that do not match any suite interfaces and do not begin
// with "Test" will not be run by testify, and can safely be used as
// helper methods.
//
// Once you've built your testing suite, you need to run the suite
// (using suite.Run from testify) inside any function that matches the
// identity that "go test" is already looking for (i.e.
// func(*testing.T)).
//
// Regular expression to select test suites specified command-line
// argument "-run". Regular expression to select the methods
// of test suites specified command-line argument "-m".
// Suite object has assertion methods.
//
// A crude example:
//
// // Basic imports
// import (
// "testing"
// "github.com/stretchr/testify/assert"
// "github.com/stretchr/testify/suite"
// )
//
// // Define the suite, and absorb the built-in basic suite
// // functionality from testify - including a T() method which
// // returns the current testing context
// type ExampleTestSuite struct {
// suite.Suite
// VariableThatShouldStartAtFive int
// }
//
// // Make sure that VariableThatShouldStartAtFive is set to five
// // before each test
// func (suite *ExampleTestSuite) SetupTest() {
// suite.VariableThatShouldStartAtFive = 5
// }
//
// // All methods that begin with "Test" are run as tests within a
// // suite.
// func (suite *ExampleTestSuite) TestExample() {
// assert.Equal(suite.T(), 5, suite.VariableThatShouldStartAtFive)
// suite.Equal(5, suite.VariableThatShouldStartAtFive)
// }
//
// // In order for 'go test' to run this suite, we need to create
// // a normal test function and pass our suite to suite.Run
// func TestExampleTestSuite(t *testing.T) {
// suite.Run(t, new(ExampleTestSuite))
// }
package suite

66
vendor/github.com/stretchr/testify/suite/interfaces.go generated vendored Normal file
View file

@ -0,0 +1,66 @@
package suite
import "testing"
// TestingSuite can store and return the current *testing.T context
// generated by 'go test'.
type TestingSuite interface {
T() *testing.T
SetT(*testing.T)
SetS(suite TestingSuite)
}
// SetupAllSuite has a SetupSuite method, which will run before the
// tests in the suite are run.
type SetupAllSuite interface {
SetupSuite()
}
// SetupTestSuite has a SetupTest method, which will run before each
// test in the suite.
type SetupTestSuite interface {
SetupTest()
}
// TearDownAllSuite has a TearDownSuite method, which will run after
// all the tests in the suite have been run.
type TearDownAllSuite interface {
TearDownSuite()
}
// TearDownTestSuite has a TearDownTest method, which will run after
// each test in the suite.
type TearDownTestSuite interface {
TearDownTest()
}
// BeforeTest has a function to be executed right before the test
// starts and receives the suite and test names as input
type BeforeTest interface {
BeforeTest(suiteName, testName string)
}
// AfterTest has a function to be executed right after the test
// finishes and receives the suite and test names as input
type AfterTest interface {
AfterTest(suiteName, testName string)
}
// WithStats implements HandleStats, a function that will be executed
// when a test suite is finished. The stats contain information about
// the execution of that suite and its tests.
type WithStats interface {
HandleStats(suiteName string, stats *SuiteInformation)
}
// SetupSubTest has a SetupSubTest method, which will run before each
// subtest in the suite.
type SetupSubTest interface {
SetupSubTest()
}
// TearDownSubTest has a TearDownSubTest method, which will run after
// each subtest in the suite have been run.
type TearDownSubTest interface {
TearDownSubTest()
}

46
vendor/github.com/stretchr/testify/suite/stats.go generated vendored Normal file
View file

@ -0,0 +1,46 @@
package suite
import "time"
// SuiteInformation stats stores stats for the whole suite execution.
type SuiteInformation struct {
Start, End time.Time
TestStats map[string]*TestInformation
}
// TestInformation stores information about the execution of each test.
type TestInformation struct {
TestName string
Start, End time.Time
Passed bool
}
func newSuiteInformation() *SuiteInformation {
testStats := make(map[string]*TestInformation)
return &SuiteInformation{
TestStats: testStats,
}
}
func (s SuiteInformation) start(testName string) {
s.TestStats[testName] = &TestInformation{
TestName: testName,
Start: time.Now(),
}
}
func (s SuiteInformation) end(testName string, passed bool) {
s.TestStats[testName].End = time.Now()
s.TestStats[testName].Passed = passed
}
func (s SuiteInformation) Passed() bool {
for _, stats := range s.TestStats {
if !stats.Passed {
return false
}
}
return true
}

248
vendor/github.com/stretchr/testify/suite/suite.go generated vendored Normal file
View file

@ -0,0 +1,248 @@
package suite
import (
"flag"
"fmt"
"os"
"reflect"
"regexp"
"runtime/debug"
"sync"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
var allTestsFilter = func(_, _ string) (bool, error) { return true, nil }
var matchMethod = flag.String("testify.m", "", "regular expression to select tests of the testify suite to run")
// Suite is a basic testing suite with methods for storing and
// retrieving the current *testing.T context.
type Suite struct {
*assert.Assertions
mu sync.RWMutex
require *require.Assertions
t *testing.T
// Parent suite to have access to the implemented methods of parent struct
s TestingSuite
}
// T retrieves the current *testing.T context.
func (suite *Suite) T() *testing.T {
suite.mu.RLock()
defer suite.mu.RUnlock()
return suite.t
}
// SetT sets the current *testing.T context.
func (suite *Suite) SetT(t *testing.T) {
suite.mu.Lock()
defer suite.mu.Unlock()
suite.t = t
suite.Assertions = assert.New(t)
suite.require = require.New(t)
}
// SetS needs to set the current test suite as parent
// to get access to the parent methods
func (suite *Suite) SetS(s TestingSuite) {
suite.s = s
}
// Require returns a require context for suite.
func (suite *Suite) Require() *require.Assertions {
suite.mu.Lock()
defer suite.mu.Unlock()
if suite.require == nil {
suite.require = require.New(suite.T())
}
return suite.require
}
// Assert returns an assert context for suite. Normally, you can call
// `suite.NoError(expected, actual)`, but for situations where the embedded
// methods are overridden (for example, you might want to override
// assert.Assertions with require.Assertions), this method is provided so you
// can call `suite.Assert().NoError()`.
func (suite *Suite) Assert() *assert.Assertions {
suite.mu.Lock()
defer suite.mu.Unlock()
if suite.Assertions == nil {
suite.Assertions = assert.New(suite.T())
}
return suite.Assertions
}
func recoverAndFailOnPanic(t *testing.T) {
r := recover()
failOnPanic(t, r)
}
func failOnPanic(t *testing.T, r interface{}) {
if r != nil {
t.Errorf("test panicked: %v\n%s", r, debug.Stack())
t.FailNow()
}
}
// Run provides suite functionality around golang subtests. It should be
// called in place of t.Run(name, func(t *testing.T)) in test suite code.
// The passed-in func will be executed as a subtest with a fresh instance of t.
// Provides compatibility with go test pkg -run TestSuite/TestName/SubTestName.
func (suite *Suite) Run(name string, subtest func()) bool {
oldT := suite.T()
if setupSubTest, ok := suite.s.(SetupSubTest); ok {
setupSubTest.SetupSubTest()
}
defer func() {
suite.SetT(oldT)
if tearDownSubTest, ok := suite.s.(TearDownSubTest); ok {
tearDownSubTest.TearDownSubTest()
}
}()
return oldT.Run(name, func(t *testing.T) {
suite.SetT(t)
subtest()
})
}
// Run takes a testing suite and runs all of the tests attached
// to it.
func Run(t *testing.T, suite TestingSuite) {
defer recoverAndFailOnPanic(t)
suite.SetT(t)
suite.SetS(suite)
var suiteSetupDone bool
var stats *SuiteInformation
if _, ok := suite.(WithStats); ok {
stats = newSuiteInformation()
}
tests := []testing.InternalTest{}
methodFinder := reflect.TypeOf(suite)
suiteName := methodFinder.Elem().Name()
for i := 0; i < methodFinder.NumMethod(); i++ {
method := methodFinder.Method(i)
ok, err := methodFilter(method.Name)
if err != nil {
fmt.Fprintf(os.Stderr, "testify: invalid regexp for -m: %s\n", err)
os.Exit(1)
}
if !ok {
continue
}
if !suiteSetupDone {
if stats != nil {
stats.Start = time.Now()
}
if setupAllSuite, ok := suite.(SetupAllSuite); ok {
setupAllSuite.SetupSuite()
}
suiteSetupDone = true
}
test := testing.InternalTest{
Name: method.Name,
F: func(t *testing.T) {
parentT := suite.T()
suite.SetT(t)
defer recoverAndFailOnPanic(t)
defer func() {
r := recover()
if stats != nil {
passed := !t.Failed() && r == nil
stats.end(method.Name, passed)
}
if afterTestSuite, ok := suite.(AfterTest); ok {
afterTestSuite.AfterTest(suiteName, method.Name)
}
if tearDownTestSuite, ok := suite.(TearDownTestSuite); ok {
tearDownTestSuite.TearDownTest()
}
suite.SetT(parentT)
failOnPanic(t, r)
}()
if setupTestSuite, ok := suite.(SetupTestSuite); ok {
setupTestSuite.SetupTest()
}
if beforeTestSuite, ok := suite.(BeforeTest); ok {
beforeTestSuite.BeforeTest(methodFinder.Elem().Name(), method.Name)
}
if stats != nil {
stats.start(method.Name)
}
method.Func.Call([]reflect.Value{reflect.ValueOf(suite)})
},
}
tests = append(tests, test)
}
if suiteSetupDone {
defer func() {
if tearDownAllSuite, ok := suite.(TearDownAllSuite); ok {
tearDownAllSuite.TearDownSuite()
}
if suiteWithStats, measureStats := suite.(WithStats); measureStats {
stats.End = time.Now()
suiteWithStats.HandleStats(suiteName, stats)
}
}()
}
runTests(t, tests)
}
// Filtering method according to set regular expression
// specified command-line argument -m
func methodFilter(name string) (bool, error) {
if ok, _ := regexp.MatchString("^Test", name); !ok {
return false, nil
}
return regexp.MatchString(*matchMethod, name)
}
func runTests(t testing.TB, tests []testing.InternalTest) {
if len(tests) == 0 {
t.Log("warning: no tests to run")
return
}
r, ok := t.(runner)
if !ok { // backwards compatibility with Go 1.6 and below
if !testing.RunTests(allTestsFilter, tests) {
t.Fail()
}
return
}
for _, test := range tests {
r.Run(test.Name, test.F)
}
}
type runner interface {
Run(name string, f func(t *testing.T)) bool
}

View file

@ -1,4 +0,0 @@
_*
*.swp
*.[568]
[568].out

View file

@ -1,3 +0,0 @@
language: go
go_import_path: gopkg.in/check.v1

25
vendor/gopkg.in/check.v1/LICENSE generated vendored
View file

@ -1,25 +0,0 @@
Gocheck - A rich testing framework for Go
Copyright (c) 2010-2013 Gustavo Niemeyer <gustavo@niemeyer.net>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

20
vendor/gopkg.in/check.v1/README.md generated vendored
View file

@ -1,20 +0,0 @@
Instructions
============
Install the package with:
go get gopkg.in/check.v1
Import it with:
import "gopkg.in/check.v1"
and use _check_ as the package name inside the code.
For more details, visit the project page:
* http://labix.org/gocheck
and the API documentation:
* https://gopkg.in/check.v1

2
vendor/gopkg.in/check.v1/TODO generated vendored
View file

@ -1,2 +0,0 @@
- Assert(slice, Contains, item)
- Parallel test support

187
vendor/gopkg.in/check.v1/benchmark.go generated vendored
View file

@ -1,187 +0,0 @@
// Copyright (c) 2012 The Go Authors. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package check
import (
"fmt"
"runtime"
"time"
)
var memStats runtime.MemStats
// testingB is a type passed to Benchmark functions to manage benchmark
// timing and to specify the number of iterations to run.
type timer struct {
start time.Time // Time test or benchmark started
duration time.Duration
N int
bytes int64
timerOn bool
benchTime time.Duration
// The initial states of memStats.Mallocs and memStats.TotalAlloc.
startAllocs uint64
startBytes uint64
// The net total of this test after being run.
netAllocs uint64
netBytes uint64
}
// StartTimer starts timing a test. This function is called automatically
// before a benchmark starts, but it can also used to resume timing after
// a call to StopTimer.
func (c *C) StartTimer() {
if !c.timerOn {
c.start = time.Now()
c.timerOn = true
runtime.ReadMemStats(&memStats)
c.startAllocs = memStats.Mallocs
c.startBytes = memStats.TotalAlloc
}
}
// StopTimer stops timing a test. This can be used to pause the timer
// while performing complex initialization that you don't
// want to measure.
func (c *C) StopTimer() {
if c.timerOn {
c.duration += time.Now().Sub(c.start)
c.timerOn = false
runtime.ReadMemStats(&memStats)
c.netAllocs += memStats.Mallocs - c.startAllocs
c.netBytes += memStats.TotalAlloc - c.startBytes
}
}
// ResetTimer sets the elapsed benchmark time to zero.
// It does not affect whether the timer is running.
func (c *C) ResetTimer() {
if c.timerOn {
c.start = time.Now()
runtime.ReadMemStats(&memStats)
c.startAllocs = memStats.Mallocs
c.startBytes = memStats.TotalAlloc
}
c.duration = 0
c.netAllocs = 0
c.netBytes = 0
}
// SetBytes informs the number of bytes that the benchmark processes
// on each iteration. If this is called in a benchmark it will also
// report MB/s.
func (c *C) SetBytes(n int64) {
c.bytes = n
}
func (c *C) nsPerOp() int64 {
if c.N <= 0 {
return 0
}
return c.duration.Nanoseconds() / int64(c.N)
}
func (c *C) mbPerSec() float64 {
if c.bytes <= 0 || c.duration <= 0 || c.N <= 0 {
return 0
}
return (float64(c.bytes) * float64(c.N) / 1e6) / c.duration.Seconds()
}
func (c *C) timerString() string {
if c.N <= 0 {
return fmt.Sprintf("%3.3fs", float64(c.duration.Nanoseconds())/1e9)
}
mbs := c.mbPerSec()
mb := ""
if mbs != 0 {
mb = fmt.Sprintf("\t%7.2f MB/s", mbs)
}
nsop := c.nsPerOp()
ns := fmt.Sprintf("%10d ns/op", nsop)
if c.N > 0 && nsop < 100 {
// The format specifiers here make sure that
// the ones digits line up for all three possible formats.
if nsop < 10 {
ns = fmt.Sprintf("%13.2f ns/op", float64(c.duration.Nanoseconds())/float64(c.N))
} else {
ns = fmt.Sprintf("%12.1f ns/op", float64(c.duration.Nanoseconds())/float64(c.N))
}
}
memStats := ""
if c.benchMem {
allocedBytes := fmt.Sprintf("%8d B/op", int64(c.netBytes)/int64(c.N))
allocs := fmt.Sprintf("%8d allocs/op", int64(c.netAllocs)/int64(c.N))
memStats = fmt.Sprintf("\t%s\t%s", allocedBytes, allocs)
}
return fmt.Sprintf("%8d\t%s%s%s", c.N, ns, mb, memStats)
}
func min(x, y int) int {
if x > y {
return y
}
return x
}
func max(x, y int) int {
if x < y {
return y
}
return x
}
// roundDown10 rounds a number down to the nearest power of 10.
func roundDown10(n int) int {
var tens = 0
// tens = floor(log_10(n))
for n > 10 {
n = n / 10
tens++
}
// result = 10^tens
result := 1
for i := 0; i < tens; i++ {
result *= 10
}
return result
}
// roundUp rounds x up to a number of the form [1eX, 2eX, 5eX].
func roundUp(n int) int {
base := roundDown10(n)
if n < (2 * base) {
return 2 * base
}
if n < (5 * base) {
return 5 * base
}
return 10 * base
}

876
vendor/gopkg.in/check.v1/check.go generated vendored
View file

@ -1,876 +0,0 @@
// Package check is a rich testing extension for Go's testing package.
//
// For details about the project, see:
//
// http://labix.org/gocheck
//
package check
import (
"bytes"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path"
"path/filepath"
"reflect"
"regexp"
"runtime"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
)
// -----------------------------------------------------------------------
// Internal type which deals with suite method calling.
const (
fixtureKd = iota
testKd
)
type funcKind int
const (
succeededSt = iota
failedSt
skippedSt
panickedSt
fixturePanickedSt
missedSt
)
type funcStatus uint32
// A method value can't reach its own Method structure.
type methodType struct {
reflect.Value
Info reflect.Method
}
func newMethod(receiver reflect.Value, i int) *methodType {
return &methodType{receiver.Method(i), receiver.Type().Method(i)}
}
func (method *methodType) PC() uintptr {
return method.Info.Func.Pointer()
}
func (method *methodType) suiteName() string {
t := method.Info.Type.In(0)
if t.Kind() == reflect.Ptr {
t = t.Elem()
}
return t.Name()
}
func (method *methodType) String() string {
return method.suiteName() + "." + method.Info.Name
}
func (method *methodType) matches(re *regexp.Regexp) bool {
return (re.MatchString(method.Info.Name) ||
re.MatchString(method.suiteName()) ||
re.MatchString(method.String()))
}
type C struct {
method *methodType
kind funcKind
testName string
_status funcStatus
logb *logger
logw io.Writer
done chan *C
reason string
mustFail bool
tempDir *tempDir
benchMem bool
startTime time.Time
timer
}
func (c *C) status() funcStatus {
return funcStatus(atomic.LoadUint32((*uint32)(&c._status)))
}
func (c *C) setStatus(s funcStatus) {
atomic.StoreUint32((*uint32)(&c._status), uint32(s))
}
func (c *C) stopNow() {
runtime.Goexit()
}
// logger is a concurrency safe byte.Buffer
type logger struct {
sync.Mutex
writer bytes.Buffer
}
func (l *logger) Write(buf []byte) (int, error) {
l.Lock()
defer l.Unlock()
return l.writer.Write(buf)
}
func (l *logger) WriteTo(w io.Writer) (int64, error) {
l.Lock()
defer l.Unlock()
return l.writer.WriteTo(w)
}
func (l *logger) String() string {
l.Lock()
defer l.Unlock()
return l.writer.String()
}
// -----------------------------------------------------------------------
// Handling of temporary files and directories.
type tempDir struct {
sync.Mutex
path string
counter int
}
func (td *tempDir) newPath() string {
td.Lock()
defer td.Unlock()
if td.path == "" {
path, err := ioutil.TempDir("", "check-")
if err != nil {
panic("Couldn't create temporary directory: " + err.Error())
}
td.path = path
}
result := filepath.Join(td.path, strconv.Itoa(td.counter))
td.counter++
return result
}
func (td *tempDir) removeAll() {
td.Lock()
defer td.Unlock()
if td.path != "" {
err := os.RemoveAll(td.path)
if err != nil {
fmt.Fprintf(os.Stderr, "WARNING: Error cleaning up temporaries: "+err.Error())
}
}
}
// Create a new temporary directory which is automatically removed after
// the suite finishes running.
func (c *C) MkDir() string {
path := c.tempDir.newPath()
if err := os.Mkdir(path, 0700); err != nil {
panic(fmt.Sprintf("Couldn't create temporary directory %s: %s", path, err.Error()))
}
return path
}
// -----------------------------------------------------------------------
// Low-level logging functions.
func (c *C) log(args ...interface{}) {
c.writeLog([]byte(fmt.Sprint(args...) + "\n"))
}
func (c *C) logf(format string, args ...interface{}) {
c.writeLog([]byte(fmt.Sprintf(format+"\n", args...)))
}
func (c *C) logNewLine() {
c.writeLog([]byte{'\n'})
}
func (c *C) writeLog(buf []byte) {
c.logb.Write(buf)
if c.logw != nil {
c.logw.Write(buf)
}
}
func hasStringOrError(x interface{}) (ok bool) {
_, ok = x.(fmt.Stringer)
if ok {
return
}
_, ok = x.(error)
return
}
func (c *C) logValue(label string, value interface{}) {
if label == "" {
if hasStringOrError(value) {
c.logf("... %#v (%q)", value, value)
} else {
c.logf("... %#v", value)
}
} else if value == nil {
c.logf("... %s = nil", label)
} else {
if hasStringOrError(value) {
fv := fmt.Sprintf("%#v", value)
qv := fmt.Sprintf("%q", value)
if fv != qv {
c.logf("... %s %s = %s (%s)", label, reflect.TypeOf(value), fv, qv)
return
}
}
if s, ok := value.(string); ok && isMultiLine(s) {
c.logf(`... %s %s = "" +`, label, reflect.TypeOf(value))
c.logMultiLine(s)
} else {
c.logf("... %s %s = %#v", label, reflect.TypeOf(value), value)
}
}
}
func formatMultiLine(s string, quote bool) []byte {
b := make([]byte, 0, len(s)*2)
i := 0
n := len(s)
for i < n {
j := i + 1
for j < n && s[j-1] != '\n' {
j++
}
b = append(b, "... "...)
if quote {
b = strconv.AppendQuote(b, s[i:j])
} else {
b = append(b, s[i:j]...)
b = bytes.TrimSpace(b)
}
if quote && j < n {
b = append(b, " +"...)
}
b = append(b, '\n')
i = j
}
return b
}
func (c *C) logMultiLine(s string) {
c.writeLog(formatMultiLine(s, true))
}
func isMultiLine(s string) bool {
for i := 0; i+1 < len(s); i++ {
if s[i] == '\n' {
return true
}
}
return false
}
func (c *C) logString(issue string) {
c.log("... ", issue)
}
func (c *C) logCaller(skip int) {
// This is a bit heavier than it ought to be.
skip++ // Our own frame.
pc, callerFile, callerLine, ok := runtime.Caller(skip)
if !ok {
return
}
var testFile string
var testLine int
testFunc := runtime.FuncForPC(c.method.PC())
if runtime.FuncForPC(pc) != testFunc {
for {
skip++
if pc, file, line, ok := runtime.Caller(skip); ok {
// Note that the test line may be different on
// distinct calls for the same test. Showing
// the "internal" line is helpful when debugging.
if runtime.FuncForPC(pc) == testFunc {
testFile, testLine = file, line
break
}
} else {
break
}
}
}
if testFile != "" && (testFile != callerFile || testLine != callerLine) {
c.logCode(testFile, testLine)
}
c.logCode(callerFile, callerLine)
}
func (c *C) logCode(path string, line int) {
c.logf("%s:%d:", nicePath(path), line)
code, err := printLine(path, line)
if code == "" {
code = "..." // XXX Open the file and take the raw line.
if err != nil {
code += err.Error()
}
}
c.log(indent(code, " "))
}
var valueGo = filepath.Join("reflect", "value.go")
var asmGo = filepath.Join("runtime", "asm_")
func (c *C) logPanic(skip int, value interface{}) {
skip++ // Our own frame.
initialSkip := skip
for ; ; skip++ {
if pc, file, line, ok := runtime.Caller(skip); ok {
if skip == initialSkip {
c.logf("... Panic: %s (PC=0x%X)\n", value, pc)
}
name := niceFuncName(pc)
path := nicePath(file)
if strings.Contains(path, "/gopkg.in/check.v") {
continue
}
if name == "Value.call" && strings.HasSuffix(path, valueGo) {
continue
}
if (name == "call16" || name == "call32") && strings.Contains(path, asmGo) {
continue
}
c.logf("%s:%d\n in %s", nicePath(file), line, name)
} else {
break
}
}
}
func (c *C) logSoftPanic(issue string) {
c.log("... Panic: ", issue)
}
func (c *C) logArgPanic(method *methodType, expectedType string) {
c.logf("... Panic: %s argument should be %s",
niceFuncName(method.PC()), expectedType)
}
// -----------------------------------------------------------------------
// Some simple formatting helpers.
var initWD, initWDErr = os.Getwd()
func init() {
if initWDErr == nil {
initWD = strings.Replace(initWD, "\\", "/", -1) + "/"
}
}
func nicePath(path string) string {
if initWDErr == nil {
if strings.HasPrefix(path, initWD) {
return path[len(initWD):]
}
}
return path
}
func niceFuncPath(pc uintptr) string {
function := runtime.FuncForPC(pc)
if function != nil {
filename, line := function.FileLine(pc)
return fmt.Sprintf("%s:%d", nicePath(filename), line)
}
return "<unknown path>"
}
func niceFuncName(pc uintptr) string {
function := runtime.FuncForPC(pc)
if function != nil {
name := path.Base(function.Name())
if i := strings.Index(name, "."); i > 0 {
name = name[i+1:]
}
if strings.HasPrefix(name, "(*") {
if i := strings.Index(name, ")"); i > 0 {
name = name[2:i] + name[i+1:]
}
}
if i := strings.LastIndex(name, ".*"); i != -1 {
name = name[:i] + "." + name[i+2:]
}
if i := strings.LastIndex(name, "·"); i != -1 {
name = name[:i] + "." + name[i+2:]
}
return name
}
return "<unknown function>"
}
// -----------------------------------------------------------------------
// Result tracker to aggregate call results.
type Result struct {
Succeeded int
Failed int
Skipped int
Panicked int
FixturePanicked int
ExpectedFailures int
Missed int // Not even tried to run, related to a panic in the fixture.
RunError error // Houston, we've got a problem.
WorkDir string // If KeepWorkDir is true
}
type resultTracker struct {
result Result
_lastWasProblem bool
_waiting int
_missed int
_expectChan chan *C
_doneChan chan *C
_stopChan chan bool
}
func newResultTracker() *resultTracker {
return &resultTracker{_expectChan: make(chan *C), // Synchronous
_doneChan: make(chan *C, 32), // Asynchronous
_stopChan: make(chan bool)} // Synchronous
}
func (tracker *resultTracker) start() {
go tracker._loopRoutine()
}
func (tracker *resultTracker) waitAndStop() {
<-tracker._stopChan
}
func (tracker *resultTracker) expectCall(c *C) {
tracker._expectChan <- c
}
func (tracker *resultTracker) callDone(c *C) {
tracker._doneChan <- c
}
func (tracker *resultTracker) _loopRoutine() {
for {
var c *C
if tracker._waiting > 0 {
// Calls still running. Can't stop.
select {
// XXX Reindent this (not now to make diff clear)
case <-tracker._expectChan:
tracker._waiting++
case c = <-tracker._doneChan:
tracker._waiting--
switch c.status() {
case succeededSt:
if c.kind == testKd {
if c.mustFail {
tracker.result.ExpectedFailures++
} else {
tracker.result.Succeeded++
}
}
case failedSt:
tracker.result.Failed++
case panickedSt:
if c.kind == fixtureKd {
tracker.result.FixturePanicked++
} else {
tracker.result.Panicked++
}
case fixturePanickedSt:
// Track it as missed, since the panic
// was on the fixture, not on the test.
tracker.result.Missed++
case missedSt:
tracker.result.Missed++
case skippedSt:
if c.kind == testKd {
tracker.result.Skipped++
}
}
}
} else {
// No calls. Can stop, but no done calls here.
select {
case tracker._stopChan <- true:
return
case <-tracker._expectChan:
tracker._waiting++
case <-tracker._doneChan:
panic("Tracker got an unexpected done call.")
}
}
}
}
// -----------------------------------------------------------------------
// The underlying suite runner.
type suiteRunner struct {
suite interface{}
setUpSuite, tearDownSuite *methodType
setUpTest, tearDownTest *methodType
tests []*methodType
tracker *resultTracker
tempDir *tempDir
keepDir bool
output *outputWriter
reportedProblemLast bool
benchTime time.Duration
benchMem bool
}
type RunConf struct {
Output io.Writer
Stream bool
Verbose bool
Filter string
Benchmark bool
BenchmarkTime time.Duration // Defaults to 1 second
BenchmarkMem bool
KeepWorkDir bool
}
// Create a new suiteRunner able to run all methods in the given suite.
func newSuiteRunner(suite interface{}, runConf *RunConf) *suiteRunner {
var conf RunConf
if runConf != nil {
conf = *runConf
}
if conf.Output == nil {
conf.Output = os.Stdout
}
if conf.Benchmark {
conf.Verbose = true
}
suiteType := reflect.TypeOf(suite)
suiteNumMethods := suiteType.NumMethod()
suiteValue := reflect.ValueOf(suite)
runner := &suiteRunner{
suite: suite,
output: newOutputWriter(conf.Output, conf.Stream, conf.Verbose),
tracker: newResultTracker(),
benchTime: conf.BenchmarkTime,
benchMem: conf.BenchmarkMem,
tempDir: &tempDir{},
keepDir: conf.KeepWorkDir,
tests: make([]*methodType, 0, suiteNumMethods),
}
if runner.benchTime == 0 {
runner.benchTime = 1 * time.Second
}
var filterRegexp *regexp.Regexp
if conf.Filter != "" {
regexp, err := regexp.Compile(conf.Filter)
if err != nil {
msg := "Bad filter expression: " + err.Error()
runner.tracker.result.RunError = errors.New(msg)
return runner
}
filterRegexp = regexp
}
for i := 0; i != suiteNumMethods; i++ {
method := newMethod(suiteValue, i)
switch method.Info.Name {
case "SetUpSuite":
runner.setUpSuite = method
case "TearDownSuite":
runner.tearDownSuite = method
case "SetUpTest":
runner.setUpTest = method
case "TearDownTest":
runner.tearDownTest = method
default:
prefix := "Test"
if conf.Benchmark {
prefix = "Benchmark"
}
if !strings.HasPrefix(method.Info.Name, prefix) {
continue
}
if filterRegexp == nil || method.matches(filterRegexp) {
runner.tests = append(runner.tests, method)
}
}
}
return runner
}
// Run all methods in the given suite.
func (runner *suiteRunner) run() *Result {
if runner.tracker.result.RunError == nil && len(runner.tests) > 0 {
runner.tracker.start()
if runner.checkFixtureArgs() {
c := runner.runFixture(runner.setUpSuite, "", nil)
if c == nil || c.status() == succeededSt {
for i := 0; i != len(runner.tests); i++ {
c := runner.runTest(runner.tests[i])
if c.status() == fixturePanickedSt {
runner.skipTests(missedSt, runner.tests[i+1:])
break
}
}
} else if c != nil && c.status() == skippedSt {
runner.skipTests(skippedSt, runner.tests)
} else {
runner.skipTests(missedSt, runner.tests)
}
runner.runFixture(runner.tearDownSuite, "", nil)
} else {
runner.skipTests(missedSt, runner.tests)
}
runner.tracker.waitAndStop()
if runner.keepDir {
runner.tracker.result.WorkDir = runner.tempDir.path
} else {
runner.tempDir.removeAll()
}
}
return &runner.tracker.result
}
// Create a call object with the given suite method, and fork a
// goroutine with the provided dispatcher for running it.
func (runner *suiteRunner) forkCall(method *methodType, kind funcKind, testName string, logb *logger, dispatcher func(c *C)) *C {
var logw io.Writer
if runner.output.Stream {
logw = runner.output
}
if logb == nil {
logb = new(logger)
}
c := &C{
method: method,
kind: kind,
testName: testName,
logb: logb,
logw: logw,
tempDir: runner.tempDir,
done: make(chan *C, 1),
timer: timer{benchTime: runner.benchTime},
startTime: time.Now(),
benchMem: runner.benchMem,
}
runner.tracker.expectCall(c)
go (func() {
runner.reportCallStarted(c)
defer runner.callDone(c)
dispatcher(c)
})()
return c
}
// Same as forkCall(), but wait for call to finish before returning.
func (runner *suiteRunner) runFunc(method *methodType, kind funcKind, testName string, logb *logger, dispatcher func(c *C)) *C {
c := runner.forkCall(method, kind, testName, logb, dispatcher)
<-c.done
return c
}
// Handle a finished call. If there were any panics, update the call status
// accordingly. Then, mark the call as done and report to the tracker.
func (runner *suiteRunner) callDone(c *C) {
value := recover()
if value != nil {
switch v := value.(type) {
case *fixturePanic:
if v.status == skippedSt {
c.setStatus(skippedSt)
} else {
c.logSoftPanic("Fixture has panicked (see related PANIC)")
c.setStatus(fixturePanickedSt)
}
default:
c.logPanic(1, value)
c.setStatus(panickedSt)
}
}
if c.mustFail {
switch c.status() {
case failedSt:
c.setStatus(succeededSt)
case succeededSt:
c.setStatus(failedSt)
c.logString("Error: Test succeeded, but was expected to fail")
c.logString("Reason: " + c.reason)
}
}
runner.reportCallDone(c)
c.done <- c
}
// Runs a fixture call synchronously. The fixture will still be run in a
// goroutine like all suite methods, but this method will not return
// while the fixture goroutine is not done, because the fixture must be
// run in a desired order.
func (runner *suiteRunner) runFixture(method *methodType, testName string, logb *logger) *C {
if method != nil {
c := runner.runFunc(method, fixtureKd, testName, logb, func(c *C) {
c.ResetTimer()
c.StartTimer()
defer c.StopTimer()
c.method.Call([]reflect.Value{reflect.ValueOf(c)})
})
return c
}
return nil
}
// Run the fixture method with runFixture(), but panic with a fixturePanic{}
// in case the fixture method panics. This makes it easier to track the
// fixture panic together with other call panics within forkTest().
func (runner *suiteRunner) runFixtureWithPanic(method *methodType, testName string, logb *logger, skipped *bool) *C {
if skipped != nil && *skipped {
return nil
}
c := runner.runFixture(method, testName, logb)
if c != nil && c.status() != succeededSt {
if skipped != nil {
*skipped = c.status() == skippedSt
}
panic(&fixturePanic{c.status(), method})
}
return c
}
type fixturePanic struct {
status funcStatus
method *methodType
}
// Run the suite test method, together with the test-specific fixture,
// asynchronously.
func (runner *suiteRunner) forkTest(method *methodType) *C {
testName := method.String()
return runner.forkCall(method, testKd, testName, nil, func(c *C) {
var skipped bool
defer runner.runFixtureWithPanic(runner.tearDownTest, testName, nil, &skipped)
defer c.StopTimer()
benchN := 1
for {
runner.runFixtureWithPanic(runner.setUpTest, testName, c.logb, &skipped)
mt := c.method.Type()
if mt.NumIn() != 1 || mt.In(0) != reflect.TypeOf(c) {
// Rather than a plain panic, provide a more helpful message when
// the argument type is incorrect.
c.setStatus(panickedSt)
c.logArgPanic(c.method, "*check.C")
return
}
if strings.HasPrefix(c.method.Info.Name, "Test") {
c.ResetTimer()
c.StartTimer()
c.method.Call([]reflect.Value{reflect.ValueOf(c)})
return
}
if !strings.HasPrefix(c.method.Info.Name, "Benchmark") {
panic("unexpected method prefix: " + c.method.Info.Name)
}
runtime.GC()
c.N = benchN
c.ResetTimer()
c.StartTimer()
c.method.Call([]reflect.Value{reflect.ValueOf(c)})
c.StopTimer()
if c.status() != succeededSt || c.duration >= c.benchTime || benchN >= 1e9 {
return
}
perOpN := int(1e9)
if c.nsPerOp() != 0 {
perOpN = int(c.benchTime.Nanoseconds() / c.nsPerOp())
}
// Logic taken from the stock testing package:
// - Run more iterations than we think we'll need for a second (1.5x).
// - Don't grow too fast in case we had timing errors previously.
// - Be sure to run at least one more than last time.
benchN = max(min(perOpN+perOpN/2, 100*benchN), benchN+1)
benchN = roundUp(benchN)
skipped = true // Don't run the deferred one if this panics.
runner.runFixtureWithPanic(runner.tearDownTest, testName, nil, nil)
skipped = false
}
})
}
// Same as forkTest(), but wait for the test to finish before returning.
func (runner *suiteRunner) runTest(method *methodType) *C {
c := runner.forkTest(method)
<-c.done
return c
}
// Helper to mark tests as skipped or missed. A bit heavy for what
// it does, but it enables homogeneous handling of tracking, including
// nice verbose output.
func (runner *suiteRunner) skipTests(status funcStatus, methods []*methodType) {
for _, method := range methods {
runner.runFunc(method, testKd, "", nil, func(c *C) {
c.setStatus(status)
})
}
}
// Verify if the fixture arguments are *check.C. In case of errors,
// log the error as a panic in the fixture method call, and return false.
func (runner *suiteRunner) checkFixtureArgs() bool {
succeeded := true
argType := reflect.TypeOf(&C{})
for _, method := range []*methodType{runner.setUpSuite, runner.tearDownSuite, runner.setUpTest, runner.tearDownTest} {
if method != nil {
mt := method.Type()
if mt.NumIn() != 1 || mt.In(0) != argType {
succeeded = false
runner.runFunc(method, fixtureKd, "", nil, func(c *C) {
c.logArgPanic(method, "*check.C")
c.setStatus(panickedSt)
})
}
}
}
return succeeded
}
func (runner *suiteRunner) reportCallStarted(c *C) {
runner.output.WriteCallStarted("START", c)
}
func (runner *suiteRunner) reportCallDone(c *C) {
runner.tracker.callDone(c)
switch c.status() {
case succeededSt:
if c.mustFail {
runner.output.WriteCallSuccess("FAIL EXPECTED", c)
} else {
runner.output.WriteCallSuccess("PASS", c)
}
case skippedSt:
runner.output.WriteCallSuccess("SKIP", c)
case failedSt:
runner.output.WriteCallProblem("FAIL", c)
case panickedSt:
runner.output.WriteCallProblem("PANIC", c)
case fixturePanickedSt:
// That's a testKd call reporting that its fixture
// has panicked. The fixture call which caused the
// panic itself was tracked above. We'll report to
// aid debugging.
runner.output.WriteCallProblem("PANIC", c)
case missedSt:
runner.output.WriteCallSuccess("MISS", c)
}
}

528
vendor/gopkg.in/check.v1/checkers.go generated vendored
View file

@ -1,528 +0,0 @@
package check
import (
"fmt"
"reflect"
"regexp"
"strings"
"github.com/kr/pretty"
)
// -----------------------------------------------------------------------
// CommentInterface and Commentf helper, to attach extra information to checks.
type comment struct {
format string
args []interface{}
}
// Commentf returns an infomational value to use with Assert or Check calls.
// If the checker test fails, the provided arguments will be passed to
// fmt.Sprintf, and will be presented next to the logged failure.
//
// For example:
//
// c.Assert(v, Equals, 42, Commentf("Iteration #%d failed.", i))
//
// Note that if the comment is constant, a better option is to
// simply use a normal comment right above or next to the line, as
// it will also get printed with any errors:
//
// c.Assert(l, Equals, 8192) // Ensure buffer size is correct (bug #123)
//
func Commentf(format string, args ...interface{}) CommentInterface {
return &comment{format, args}
}
// CommentInterface must be implemented by types that attach extra
// information to failed checks. See the Commentf function for details.
type CommentInterface interface {
CheckCommentString() string
}
func (c *comment) CheckCommentString() string {
return fmt.Sprintf(c.format, c.args...)
}
// -----------------------------------------------------------------------
// The Checker interface.
// The Checker interface must be provided by checkers used with
// the Assert and Check verification methods.
type Checker interface {
Info() *CheckerInfo
Check(params []interface{}, names []string) (result bool, error string)
}
// See the Checker interface.
type CheckerInfo struct {
Name string
Params []string
}
func (info *CheckerInfo) Info() *CheckerInfo {
return info
}
// -----------------------------------------------------------------------
// Not checker logic inverter.
// The Not checker inverts the logic of the provided checker. The
// resulting checker will succeed where the original one failed, and
// vice-versa.
//
// For example:
//
// c.Assert(a, Not(Equals), b)
//
func Not(checker Checker) Checker {
return &notChecker{checker}
}
type notChecker struct {
sub Checker
}
func (checker *notChecker) Info() *CheckerInfo {
info := *checker.sub.Info()
info.Name = "Not(" + info.Name + ")"
return &info
}
func (checker *notChecker) Check(params []interface{}, names []string) (result bool, error string) {
result, error = checker.sub.Check(params, names)
result = !result
if result {
// clear error message if the new result is true
error = ""
}
return
}
// -----------------------------------------------------------------------
// IsNil checker.
type isNilChecker struct {
*CheckerInfo
}
// The IsNil checker tests whether the obtained value is nil.
//
// For example:
//
// c.Assert(err, IsNil)
//
var IsNil Checker = &isNilChecker{
&CheckerInfo{Name: "IsNil", Params: []string{"value"}},
}
func (checker *isNilChecker) Check(params []interface{}, names []string) (result bool, error string) {
return isNil(params[0]), ""
}
func isNil(obtained interface{}) (result bool) {
if obtained == nil {
result = true
} else {
switch v := reflect.ValueOf(obtained); v.Kind() {
case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
return v.IsNil()
}
}
return
}
// -----------------------------------------------------------------------
// NotNil checker. Alias for Not(IsNil), since it's so common.
type notNilChecker struct {
*CheckerInfo
}
// The NotNil checker verifies that the obtained value is not nil.
//
// For example:
//
// c.Assert(iface, NotNil)
//
// This is an alias for Not(IsNil), made available since it's a
// fairly common check.
//
var NotNil Checker = &notNilChecker{
&CheckerInfo{Name: "NotNil", Params: []string{"value"}},
}
func (checker *notNilChecker) Check(params []interface{}, names []string) (result bool, error string) {
return !isNil(params[0]), ""
}
// -----------------------------------------------------------------------
// Equals checker.
func diffworthy(a interface{}) bool {
if a == nil {
return false
}
t := reflect.TypeOf(a)
switch t.Kind() {
case reflect.Array, reflect.Map, reflect.Slice, reflect.Struct, reflect.String, reflect.Ptr:
return true
}
return false
}
// formatUnequal will dump the actual and expected values into a textual
// representation and return an error message containing a diff.
func formatUnequal(obtained interface{}, expected interface{}) string {
// We do not do diffs for basic types because go-check already
// shows them very cleanly.
if !diffworthy(obtained) || !diffworthy(expected) {
return ""
}
// Handle strings, short strings are ignored (go-check formats
// them very nicely already). We do multi-line strings by
// generating two string slices and using kr.Diff to compare
// those (kr.Diff does not do string diffs by itself).
aStr, aOK := obtained.(string)
bStr, bOK := expected.(string)
if aOK && bOK {
l1 := strings.Split(aStr, "\n")
l2 := strings.Split(bStr, "\n")
// the "2" here is a bit arbitrary
if len(l1) > 2 && len(l2) > 2 {
diff := pretty.Diff(l1, l2)
return fmt.Sprintf(`String difference:
%s`, formatMultiLine(strings.Join(diff, "\n"), false))
}
// string too short
return ""
}
// generic diff
diff := pretty.Diff(obtained, expected)
if len(diff) == 0 {
// No diff, this happens when e.g. just struct
// pointers are different but the structs have
// identical values.
return ""
}
return fmt.Sprintf(`Difference:
%s`, formatMultiLine(strings.Join(diff, "\n"), false))
}
type equalsChecker struct {
*CheckerInfo
}
// The Equals checker verifies that the obtained value is equal to
// the expected value, according to usual Go semantics for ==.
//
// For example:
//
// c.Assert(value, Equals, 42)
//
var Equals Checker = &equalsChecker{
&CheckerInfo{Name: "Equals", Params: []string{"obtained", "expected"}},
}
func (checker *equalsChecker) Check(params []interface{}, names []string) (result bool, error string) {
defer func() {
if v := recover(); v != nil {
result = false
error = fmt.Sprint(v)
}
}()
result = params[0] == params[1]
if !result {
error = formatUnequal(params[0], params[1])
}
return
}
// -----------------------------------------------------------------------
// DeepEquals checker.
type deepEqualsChecker struct {
*CheckerInfo
}
// The DeepEquals checker verifies that the obtained value is deep-equal to
// the expected value. The check will work correctly even when facing
// slices, interfaces, and values of different types (which always fail
// the test).
//
// For example:
//
// c.Assert(value, DeepEquals, 42)
// c.Assert(array, DeepEquals, []string{"hi", "there"})
//
var DeepEquals Checker = &deepEqualsChecker{
&CheckerInfo{Name: "DeepEquals", Params: []string{"obtained", "expected"}},
}
func (checker *deepEqualsChecker) Check(params []interface{}, names []string) (result bool, error string) {
result = reflect.DeepEqual(params[0], params[1])
if !result {
error = formatUnequal(params[0], params[1])
}
return
}
// -----------------------------------------------------------------------
// HasLen checker.
type hasLenChecker struct {
*CheckerInfo
}
// The HasLen checker verifies that the obtained value has the
// provided length. In many cases this is superior to using Equals
// in conjunction with the len function because in case the check
// fails the value itself will be printed, instead of its length,
// providing more details for figuring the problem.
//
// For example:
//
// c.Assert(list, HasLen, 5)
//
var HasLen Checker = &hasLenChecker{
&CheckerInfo{Name: "HasLen", Params: []string{"obtained", "n"}},
}
func (checker *hasLenChecker) Check(params []interface{}, names []string) (result bool, error string) {
n, ok := params[1].(int)
if !ok {
return false, "n must be an int"
}
value := reflect.ValueOf(params[0])
switch value.Kind() {
case reflect.Map, reflect.Array, reflect.Slice, reflect.Chan, reflect.String:
default:
return false, "obtained value type has no length"
}
return value.Len() == n, ""
}
// -----------------------------------------------------------------------
// ErrorMatches checker.
type errorMatchesChecker struct {
*CheckerInfo
}
// The ErrorMatches checker verifies that the error value
// is non nil and matches the regular expression provided.
//
// For example:
//
// c.Assert(err, ErrorMatches, "perm.*denied")
//
var ErrorMatches Checker = errorMatchesChecker{
&CheckerInfo{Name: "ErrorMatches", Params: []string{"value", "regex"}},
}
func (checker errorMatchesChecker) Check(params []interface{}, names []string) (result bool, errStr string) {
if params[0] == nil {
return false, "Error value is nil"
}
err, ok := params[0].(error)
if !ok {
return false, "Value is not an error"
}
params[0] = err.Error()
names[0] = "error"
return matches(params[0], params[1])
}
// -----------------------------------------------------------------------
// Matches checker.
type matchesChecker struct {
*CheckerInfo
}
// The Matches checker verifies that the string provided as the obtained
// value (or the string resulting from obtained.String()) matches the
// regular expression provided.
//
// For example:
//
// c.Assert(err, Matches, "perm.*denied")
//
var Matches Checker = &matchesChecker{
&CheckerInfo{Name: "Matches", Params: []string{"value", "regex"}},
}
func (checker *matchesChecker) Check(params []interface{}, names []string) (result bool, error string) {
return matches(params[0], params[1])
}
func matches(value, regex interface{}) (result bool, error string) {
reStr, ok := regex.(string)
if !ok {
return false, "Regex must be a string"
}
valueStr, valueIsStr := value.(string)
if !valueIsStr {
if valueWithStr, valueHasStr := value.(fmt.Stringer); valueHasStr {
valueStr, valueIsStr = valueWithStr.String(), true
}
}
if valueIsStr {
matches, err := regexp.MatchString("^"+reStr+"$", valueStr)
if err != nil {
return false, "Can't compile regex: " + err.Error()
}
return matches, ""
}
return false, "Obtained value is not a string and has no .String()"
}
// -----------------------------------------------------------------------
// Panics checker.
type panicsChecker struct {
*CheckerInfo
}
// The Panics checker verifies that calling the provided zero-argument
// function will cause a panic which is deep-equal to the provided value.
//
// For example:
//
// c.Assert(func() { f(1, 2) }, Panics, &SomeErrorType{"BOOM"}).
//
//
var Panics Checker = &panicsChecker{
&CheckerInfo{Name: "Panics", Params: []string{"function", "expected"}},
}
func (checker *panicsChecker) Check(params []interface{}, names []string) (result bool, error string) {
f := reflect.ValueOf(params[0])
if f.Kind() != reflect.Func || f.Type().NumIn() != 0 {
return false, "Function must take zero arguments"
}
defer func() {
// If the function has not panicked, then don't do the check.
if error != "" {
return
}
params[0] = recover()
names[0] = "panic"
result = reflect.DeepEqual(params[0], params[1])
}()
f.Call(nil)
return false, "Function has not panicked"
}
type panicMatchesChecker struct {
*CheckerInfo
}
// The PanicMatches checker verifies that calling the provided zero-argument
// function will cause a panic with an error value matching
// the regular expression provided.
//
// For example:
//
// c.Assert(func() { f(1, 2) }, PanicMatches, `open.*: no such file or directory`).
//
//
var PanicMatches Checker = &panicMatchesChecker{
&CheckerInfo{Name: "PanicMatches", Params: []string{"function", "expected"}},
}
func (checker *panicMatchesChecker) Check(params []interface{}, names []string) (result bool, errmsg string) {
f := reflect.ValueOf(params[0])
if f.Kind() != reflect.Func || f.Type().NumIn() != 0 {
return false, "Function must take zero arguments"
}
defer func() {
// If the function has not panicked, then don't do the check.
if errmsg != "" {
return
}
obtained := recover()
names[0] = "panic"
if e, ok := obtained.(error); ok {
params[0] = e.Error()
} else if _, ok := obtained.(string); ok {
params[0] = obtained
} else {
errmsg = "Panic value is not a string or an error"
return
}
result, errmsg = matches(params[0], params[1])
}()
f.Call(nil)
return false, "Function has not panicked"
}
// -----------------------------------------------------------------------
// FitsTypeOf checker.
type fitsTypeChecker struct {
*CheckerInfo
}
// The FitsTypeOf checker verifies that the obtained value is
// assignable to a variable with the same type as the provided
// sample value.
//
// For example:
//
// c.Assert(value, FitsTypeOf, int64(0))
// c.Assert(value, FitsTypeOf, os.Error(nil))
//
var FitsTypeOf Checker = &fitsTypeChecker{
&CheckerInfo{Name: "FitsTypeOf", Params: []string{"obtained", "sample"}},
}
func (checker *fitsTypeChecker) Check(params []interface{}, names []string) (result bool, error string) {
obtained := reflect.ValueOf(params[0])
sample := reflect.ValueOf(params[1])
if !obtained.IsValid() {
return false, ""
}
if !sample.IsValid() {
return false, "Invalid sample value"
}
return obtained.Type().AssignableTo(sample.Type()), ""
}
// -----------------------------------------------------------------------
// Implements checker.
type implementsChecker struct {
*CheckerInfo
}
// The Implements checker verifies that the obtained value
// implements the interface specified via a pointer to an interface
// variable.
//
// For example:
//
// var e os.Error
// c.Assert(err, Implements, &e)
//
var Implements Checker = &implementsChecker{
&CheckerInfo{Name: "Implements", Params: []string{"obtained", "ifaceptr"}},
}
func (checker *implementsChecker) Check(params []interface{}, names []string) (result bool, error string) {
obtained := reflect.ValueOf(params[0])
ifaceptr := reflect.ValueOf(params[1])
if !obtained.IsValid() {
return false, ""
}
if !ifaceptr.IsValid() || ifaceptr.Kind() != reflect.Ptr || ifaceptr.Elem().Kind() != reflect.Interface {
return false, "ifaceptr should be a pointer to an interface variable"
}
return obtained.Type().Implements(ifaceptr.Elem().Type()), ""
}

233
vendor/gopkg.in/check.v1/helpers.go generated vendored
View file

@ -1,233 +0,0 @@
package check
import (
"fmt"
"strings"
"time"
)
// TestName returns the current test name in the form "SuiteName.TestName"
func (c *C) TestName() string {
return c.testName
}
// -----------------------------------------------------------------------
// Basic succeeding/failing logic.
// Failed returns whether the currently running test has already failed.
func (c *C) Failed() bool {
return c.status() == failedSt
}
// Fail marks the currently running test as failed.
//
// Something ought to have been previously logged so the developer can tell
// what went wrong. The higher level helper functions will fail the test
// and do the logging properly.
func (c *C) Fail() {
c.setStatus(failedSt)
}
// FailNow marks the currently running test as failed and stops running it.
// Something ought to have been previously logged so the developer can tell
// what went wrong. The higher level helper functions will fail the test
// and do the logging properly.
func (c *C) FailNow() {
c.Fail()
c.stopNow()
}
// Succeed marks the currently running test as succeeded, undoing any
// previous failures.
func (c *C) Succeed() {
c.setStatus(succeededSt)
}
// SucceedNow marks the currently running test as succeeded, undoing any
// previous failures, and stops running the test.
func (c *C) SucceedNow() {
c.Succeed()
c.stopNow()
}
// ExpectFailure informs that the running test is knowingly broken for
// the provided reason. If the test does not fail, an error will be reported
// to raise attention to this fact. This method is useful to temporarily
// disable tests which cover well known problems until a better time to
// fix the problem is found, without forgetting about the fact that a
// failure still exists.
func (c *C) ExpectFailure(reason string) {
if reason == "" {
panic("Missing reason why the test is expected to fail")
}
c.mustFail = true
c.reason = reason
}
// Skip skips the running test for the provided reason. If run from within
// SetUpTest, the individual test being set up will be skipped, and if run
// from within SetUpSuite, the whole suite is skipped.
func (c *C) Skip(reason string) {
if reason == "" {
panic("Missing reason why the test is being skipped")
}
c.reason = reason
c.setStatus(skippedSt)
c.stopNow()
}
// -----------------------------------------------------------------------
// Basic logging.
// GetTestLog returns the current test error output.
func (c *C) GetTestLog() string {
return c.logb.String()
}
// Log logs some information into the test error output.
// The provided arguments are assembled together into a string with fmt.Sprint.
func (c *C) Log(args ...interface{}) {
c.log(args...)
}
// Log logs some information into the test error output.
// The provided arguments are assembled together into a string with fmt.Sprintf.
func (c *C) Logf(format string, args ...interface{}) {
c.logf(format, args...)
}
// Output enables *C to be used as a logger in functions that require only
// the minimum interface of *log.Logger.
func (c *C) Output(calldepth int, s string) error {
d := time.Now().Sub(c.startTime)
msec := d / time.Millisecond
sec := d / time.Second
min := d / time.Minute
c.Logf("[LOG] %d:%02d.%03d %s", min, sec%60, msec%1000, s)
return nil
}
// Error logs an error into the test error output and marks the test as failed.
// The provided arguments are assembled together into a string with fmt.Sprint.
func (c *C) Error(args ...interface{}) {
c.logCaller(1)
c.logString(fmt.Sprint("Error: ", fmt.Sprint(args...)))
c.logNewLine()
c.Fail()
}
// Errorf logs an error into the test error output and marks the test as failed.
// The provided arguments are assembled together into a string with fmt.Sprintf.
func (c *C) Errorf(format string, args ...interface{}) {
c.logCaller(1)
c.logString(fmt.Sprintf("Error: "+format, args...))
c.logNewLine()
c.Fail()
}
// Fatal logs an error into the test error output, marks the test as failed, and
// stops the test execution. The provided arguments are assembled together into
// a string with fmt.Sprint.
func (c *C) Fatal(args ...interface{}) {
c.logCaller(1)
c.logString(fmt.Sprint("Error: ", fmt.Sprint(args...)))
c.logNewLine()
c.FailNow()
}
// Fatlaf logs an error into the test error output, marks the test as failed, and
// stops the test execution. The provided arguments are assembled together into
// a string with fmt.Sprintf.
func (c *C) Fatalf(format string, args ...interface{}) {
c.logCaller(1)
c.logString(fmt.Sprint("Error: ", fmt.Sprintf(format, args...)))
c.logNewLine()
c.FailNow()
}
// -----------------------------------------------------------------------
// Generic checks and assertions based on checkers.
// Check verifies if the first value matches the expected value according
// to the provided checker. If they do not match, an error is logged, the
// test is marked as failed, and the test execution continues.
//
// Some checkers may not need the expected argument (e.g. IsNil).
//
// If the last value in args implements CommentInterface, it is used to log
// additional information instead of being passed to the checker (see Commentf
// for an example).
func (c *C) Check(obtained interface{}, checker Checker, args ...interface{}) bool {
return c.internalCheck("Check", obtained, checker, args...)
}
// Assert ensures that the first value matches the expected value according
// to the provided checker. If they do not match, an error is logged, the
// test is marked as failed, and the test execution stops.
//
// Some checkers may not need the expected argument (e.g. IsNil).
//
// If the last value in args implements CommentInterface, it is used to log
// additional information instead of being passed to the checker (see Commentf
// for an example).
func (c *C) Assert(obtained interface{}, checker Checker, args ...interface{}) {
if !c.internalCheck("Assert", obtained, checker, args...) {
c.stopNow()
}
}
func (c *C) internalCheck(funcName string, obtained interface{}, checker Checker, args ...interface{}) bool {
if checker == nil {
c.logCaller(2)
c.logString(fmt.Sprintf("%s(obtained, nil!?, ...):", funcName))
c.logString("Oops.. you've provided a nil checker!")
c.logNewLine()
c.Fail()
return false
}
// If the last argument is a bug info, extract it out.
var comment CommentInterface
if len(args) > 0 {
if c, ok := args[len(args)-1].(CommentInterface); ok {
comment = c
args = args[:len(args)-1]
}
}
params := append([]interface{}{obtained}, args...)
info := checker.Info()
if len(params) != len(info.Params) {
names := append([]string{info.Params[0], info.Name}, info.Params[1:]...)
c.logCaller(2)
c.logString(fmt.Sprintf("%s(%s):", funcName, strings.Join(names, ", ")))
c.logString(fmt.Sprintf("Wrong number of parameters for %s: want %d, got %d", info.Name, len(names), len(params)+1))
c.logNewLine()
c.Fail()
return false
}
// Copy since it may be mutated by Check.
names := append([]string{}, info.Params...)
// Do the actual check.
result, error := checker.Check(params, names)
if !result || error != "" {
c.logCaller(2)
for i := 0; i != len(params); i++ {
c.logValue(names[i], params[i])
}
if comment != nil {
c.logString(comment.CheckCommentString())
}
if error != "" {
c.logString(error)
}
c.logNewLine()
c.Fail()
return false
}
return true
}

168
vendor/gopkg.in/check.v1/printer.go generated vendored
View file

@ -1,168 +0,0 @@
package check
import (
"bytes"
"go/ast"
"go/parser"
"go/printer"
"go/token"
"os"
)
func indent(s, with string) (r string) {
eol := true
for i := 0; i != len(s); i++ {
c := s[i]
switch {
case eol && c == '\n' || c == '\r':
case c == '\n' || c == '\r':
eol = true
case eol:
eol = false
s = s[:i] + with + s[i:]
i += len(with)
}
}
return s
}
func printLine(filename string, line int) (string, error) {
fset := token.NewFileSet()
file, err := os.Open(filename)
if err != nil {
return "", err
}
fnode, err := parser.ParseFile(fset, filename, file, parser.ParseComments)
if err != nil {
return "", err
}
config := &printer.Config{Mode: printer.UseSpaces, Tabwidth: 4}
lp := &linePrinter{fset: fset, fnode: fnode, line: line, config: config}
ast.Walk(lp, fnode)
result := lp.output.Bytes()
// Comments leave \n at the end.
n := len(result)
for n > 0 && result[n-1] == '\n' {
n--
}
return string(result[:n]), nil
}
type linePrinter struct {
config *printer.Config
fset *token.FileSet
fnode *ast.File
line int
output bytes.Buffer
stmt ast.Stmt
}
func (lp *linePrinter) emit() bool {
if lp.stmt != nil {
lp.trim(lp.stmt)
lp.printWithComments(lp.stmt)
lp.stmt = nil
return true
}
return false
}
func (lp *linePrinter) printWithComments(n ast.Node) {
nfirst := lp.fset.Position(n.Pos()).Line
nlast := lp.fset.Position(n.End()).Line
for _, g := range lp.fnode.Comments {
cfirst := lp.fset.Position(g.Pos()).Line
clast := lp.fset.Position(g.End()).Line
if clast == nfirst-1 && lp.fset.Position(n.Pos()).Column == lp.fset.Position(g.Pos()).Column {
for _, c := range g.List {
lp.output.WriteString(c.Text)
lp.output.WriteByte('\n')
}
}
if cfirst >= nfirst && cfirst <= nlast && n.End() <= g.List[0].Slash {
// The printer will not include the comment if it starts past
// the node itself. Trick it into printing by overlapping the
// slash with the end of the statement.
g.List[0].Slash = n.End() - 1
}
}
node := &printer.CommentedNode{n, lp.fnode.Comments}
lp.config.Fprint(&lp.output, lp.fset, node)
}
func (lp *linePrinter) Visit(n ast.Node) (w ast.Visitor) {
if n == nil {
if lp.output.Len() == 0 {
lp.emit()
}
return nil
}
first := lp.fset.Position(n.Pos()).Line
last := lp.fset.Position(n.End()).Line
if first <= lp.line && last >= lp.line {
// Print the innermost statement containing the line.
if stmt, ok := n.(ast.Stmt); ok {
if _, ok := n.(*ast.BlockStmt); !ok {
lp.stmt = stmt
}
}
if first == lp.line && lp.emit() {
return nil
}
return lp
}
return nil
}
func (lp *linePrinter) trim(n ast.Node) bool {
stmt, ok := n.(ast.Stmt)
if !ok {
return true
}
line := lp.fset.Position(n.Pos()).Line
if line != lp.line {
return false
}
switch stmt := stmt.(type) {
case *ast.IfStmt:
stmt.Body = lp.trimBlock(stmt.Body)
case *ast.SwitchStmt:
stmt.Body = lp.trimBlock(stmt.Body)
case *ast.TypeSwitchStmt:
stmt.Body = lp.trimBlock(stmt.Body)
case *ast.CaseClause:
stmt.Body = lp.trimList(stmt.Body)
case *ast.CommClause:
stmt.Body = lp.trimList(stmt.Body)
case *ast.BlockStmt:
stmt.List = lp.trimList(stmt.List)
}
return true
}
func (lp *linePrinter) trimBlock(stmt *ast.BlockStmt) *ast.BlockStmt {
if !lp.trim(stmt) {
return lp.emptyBlock(stmt)
}
stmt.Rbrace = stmt.Lbrace
return stmt
}
func (lp *linePrinter) trimList(stmts []ast.Stmt) []ast.Stmt {
for i := 0; i != len(stmts); i++ {
if !lp.trim(stmts[i]) {
stmts[i] = lp.emptyStmt(stmts[i])
break
}
}
return stmts
}
func (lp *linePrinter) emptyStmt(n ast.Node) *ast.ExprStmt {
return &ast.ExprStmt{&ast.Ellipsis{n.Pos(), nil}}
}
func (lp *linePrinter) emptyBlock(n ast.Node) *ast.BlockStmt {
p := n.Pos()
return &ast.BlockStmt{p, []ast.Stmt{lp.emptyStmt(n)}, p}
}

88
vendor/gopkg.in/check.v1/reporter.go generated vendored
View file

@ -1,88 +0,0 @@
package check
import (
"fmt"
"io"
"sync"
)
// -----------------------------------------------------------------------
// Output writer manages atomic output writing according to settings.
type outputWriter struct {
m sync.Mutex
writer io.Writer
wroteCallProblemLast bool
Stream bool
Verbose bool
}
func newOutputWriter(writer io.Writer, stream, verbose bool) *outputWriter {
return &outputWriter{writer: writer, Stream: stream, Verbose: verbose}
}
func (ow *outputWriter) Write(content []byte) (n int, err error) {
ow.m.Lock()
n, err = ow.writer.Write(content)
ow.m.Unlock()
return
}
func (ow *outputWriter) WriteCallStarted(label string, c *C) {
if ow.Stream {
header := renderCallHeader(label, c, "", "\n")
ow.m.Lock()
ow.writer.Write([]byte(header))
ow.m.Unlock()
}
}
func (ow *outputWriter) WriteCallProblem(label string, c *C) {
var prefix string
if !ow.Stream {
prefix = "\n-----------------------------------" +
"-----------------------------------\n"
}
header := renderCallHeader(label, c, prefix, "\n\n")
ow.m.Lock()
ow.wroteCallProblemLast = true
ow.writer.Write([]byte(header))
if !ow.Stream {
c.logb.WriteTo(ow.writer)
}
ow.m.Unlock()
}
func (ow *outputWriter) WriteCallSuccess(label string, c *C) {
if ow.Stream || (ow.Verbose && c.kind == testKd) {
// TODO Use a buffer here.
var suffix string
if c.reason != "" {
suffix = " (" + c.reason + ")"
}
if c.status() == succeededSt {
suffix += "\t" + c.timerString()
}
suffix += "\n"
if ow.Stream {
suffix += "\n"
}
header := renderCallHeader(label, c, "", suffix)
ow.m.Lock()
// Resist temptation of using line as prefix above due to race.
if !ow.Stream && ow.wroteCallProblemLast {
header = "\n-----------------------------------" +
"-----------------------------------\n" +
header
}
ow.wroteCallProblemLast = false
ow.writer.Write([]byte(header))
ow.m.Unlock()
}
}
func renderCallHeader(label string, c *C, prefix, suffix string) string {
pc := c.method.PC()
return fmt.Sprintf("%s%s: %s: %s%s", prefix, label, niceFuncPath(pc),
niceFuncName(pc), suffix)
}

175
vendor/gopkg.in/check.v1/run.go generated vendored
View file

@ -1,175 +0,0 @@
package check
import (
"bufio"
"flag"
"fmt"
"os"
"testing"
"time"
)
// -----------------------------------------------------------------------
// Test suite registry.
var allSuites []interface{}
// Suite registers the given value as a test suite to be run. Any methods
// starting with the Test prefix in the given value will be considered as
// a test method.
func Suite(suite interface{}) interface{} {
allSuites = append(allSuites, suite)
return suite
}
// -----------------------------------------------------------------------
// Public running interface.
var (
oldFilterFlag = flag.String("gocheck.f", "", "Regular expression selecting which tests and/or suites to run")
oldVerboseFlag = flag.Bool("gocheck.v", false, "Verbose mode")
oldStreamFlag = flag.Bool("gocheck.vv", false, "Super verbose mode (disables output caching)")
oldBenchFlag = flag.Bool("gocheck.b", false, "Run benchmarks")
oldBenchTime = flag.Duration("gocheck.btime", 1*time.Second, "approximate run time for each benchmark")
oldListFlag = flag.Bool("gocheck.list", false, "List the names of all tests that will be run")
oldWorkFlag = flag.Bool("gocheck.work", false, "Display and do not remove the test working directory")
newFilterFlag = flag.String("check.f", "", "Regular expression selecting which tests and/or suites to run")
newVerboseFlag = flag.Bool("check.v", false, "Verbose mode")
newStreamFlag = flag.Bool("check.vv", false, "Super verbose mode (disables output caching)")
newBenchFlag = flag.Bool("check.b", false, "Run benchmarks")
newBenchTime = flag.Duration("check.btime", 1*time.Second, "approximate run time for each benchmark")
newBenchMem = flag.Bool("check.bmem", false, "Report memory benchmarks")
newListFlag = flag.Bool("check.list", false, "List the names of all tests that will be run")
newWorkFlag = flag.Bool("check.work", false, "Display and do not remove the test working directory")
)
// TestingT runs all test suites registered with the Suite function,
// printing results to stdout, and reporting any failures back to
// the "testing" package.
func TestingT(testingT *testing.T) {
benchTime := *newBenchTime
if benchTime == 1*time.Second {
benchTime = *oldBenchTime
}
conf := &RunConf{
Filter: *oldFilterFlag + *newFilterFlag,
Verbose: *oldVerboseFlag || *newVerboseFlag,
Stream: *oldStreamFlag || *newStreamFlag,
Benchmark: *oldBenchFlag || *newBenchFlag,
BenchmarkTime: benchTime,
BenchmarkMem: *newBenchMem,
KeepWorkDir: *oldWorkFlag || *newWorkFlag,
}
if *oldListFlag || *newListFlag {
w := bufio.NewWriter(os.Stdout)
for _, name := range ListAll(conf) {
fmt.Fprintln(w, name)
}
w.Flush()
return
}
result := RunAll(conf)
println(result.String())
if !result.Passed() {
testingT.Fail()
}
}
// RunAll runs all test suites registered with the Suite function, using the
// provided run configuration.
func RunAll(runConf *RunConf) *Result {
result := Result{}
for _, suite := range allSuites {
result.Add(Run(suite, runConf))
}
return &result
}
// Run runs the provided test suite using the provided run configuration.
func Run(suite interface{}, runConf *RunConf) *Result {
runner := newSuiteRunner(suite, runConf)
return runner.run()
}
// ListAll returns the names of all the test functions registered with the
// Suite function that will be run with the provided run configuration.
func ListAll(runConf *RunConf) []string {
var names []string
for _, suite := range allSuites {
names = append(names, List(suite, runConf)...)
}
return names
}
// List returns the names of the test functions in the given
// suite that will be run with the provided run configuration.
func List(suite interface{}, runConf *RunConf) []string {
var names []string
runner := newSuiteRunner(suite, runConf)
for _, t := range runner.tests {
names = append(names, t.String())
}
return names
}
// -----------------------------------------------------------------------
// Result methods.
func (r *Result) Add(other *Result) {
r.Succeeded += other.Succeeded
r.Skipped += other.Skipped
r.Failed += other.Failed
r.Panicked += other.Panicked
r.FixturePanicked += other.FixturePanicked
r.ExpectedFailures += other.ExpectedFailures
r.Missed += other.Missed
if r.WorkDir != "" && other.WorkDir != "" {
r.WorkDir += ":" + other.WorkDir
} else if other.WorkDir != "" {
r.WorkDir = other.WorkDir
}
}
func (r *Result) Passed() bool {
return (r.Failed == 0 && r.Panicked == 0 &&
r.FixturePanicked == 0 && r.Missed == 0 &&
r.RunError == nil)
}
func (r *Result) String() string {
if r.RunError != nil {
return "ERROR: " + r.RunError.Error()
}
var value string
if r.Failed == 0 && r.Panicked == 0 && r.FixturePanicked == 0 &&
r.Missed == 0 {
value = "OK: "
} else {
value = "OOPS: "
}
value += fmt.Sprintf("%d passed", r.Succeeded)
if r.Skipped != 0 {
value += fmt.Sprintf(", %d skipped", r.Skipped)
}
if r.ExpectedFailures != 0 {
value += fmt.Sprintf(", %d expected failures", r.ExpectedFailures)
}
if r.Failed != 0 {
value += fmt.Sprintf(", %d FAILED", r.Failed)
}
if r.Panicked != 0 {
value += fmt.Sprintf(", %d PANICKED", r.Panicked)
}
if r.FixturePanicked != 0 {
value += fmt.Sprintf(", %d FIXTURE-PANICKED", r.FixturePanicked)
}
if r.Missed != 0 {
value += fmt.Sprintf(", %d MISSED", r.Missed)
}
if r.WorkDir != "" {
value += "\nWORK=" + r.WorkDir
}
return value
}

50
vendor/gopkg.in/yaml.v3/LICENSE generated vendored Normal file
View file

@ -0,0 +1,50 @@
This project is covered by two different licenses: MIT and Apache.
#### MIT License ####
The following files were ported to Go from C files of libyaml, and thus
are still covered by their original MIT license, with the additional
copyright staring in 2011 when the project was ported over:
apic.go emitterc.go parserc.go readerc.go scannerc.go
writerc.go yamlh.go yamlprivateh.go
Copyright (c) 2006-2010 Kirill Simonov
Copyright (c) 2006-2011 Kirill Simonov
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
### Apache License ###
All the remaining project files are covered by the Apache license:
Copyright (c) 2011-2019 Canonical Ltd
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

13
vendor/gopkg.in/yaml.v3/NOTICE generated vendored Normal file
View file

@ -0,0 +1,13 @@
Copyright 2011-2016 Canonical Ltd.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

150
vendor/gopkg.in/yaml.v3/README.md generated vendored Normal file
View file

@ -0,0 +1,150 @@
# YAML support for the Go language
Introduction
------------
The yaml package enables Go programs to comfortably encode and decode YAML
values. It was developed within [Canonical](https://www.canonical.com) as
part of the [juju](https://juju.ubuntu.com) project, and is based on a
pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML)
C library to parse and generate YAML data quickly and reliably.
Compatibility
-------------
The yaml package supports most of YAML 1.2, but preserves some behavior
from 1.1 for backwards compatibility.
Specifically, as of v3 of the yaml package:
- YAML 1.1 bools (_yes/no, on/off_) are supported as long as they are being
decoded into a typed bool value. Otherwise they behave as a string. Booleans
in YAML 1.2 are _true/false_ only.
- Octals encode and decode as _0777_ per YAML 1.1, rather than _0o777_
as specified in YAML 1.2, because most parsers still use the old format.
Octals in the _0o777_ format are supported though, so new files work.
- Does not support base-60 floats. These are gone from YAML 1.2, and were
actually never supported by this package as it's clearly a poor choice.
and offers backwards
compatibility with YAML 1.1 in some cases.
1.2, including support for
anchors, tags, map merging, etc. Multi-document unmarshalling is not yet
implemented, and base-60 floats from YAML 1.1 are purposefully not
supported since they're a poor design and are gone in YAML 1.2.
Installation and usage
----------------------
The import path for the package is *gopkg.in/yaml.v3*.
To install it, run:
go get gopkg.in/yaml.v3
API documentation
-----------------
If opened in a browser, the import path itself leads to the API documentation:
- [https://gopkg.in/yaml.v3](https://gopkg.in/yaml.v3)
API stability
-------------
The package API for yaml v3 will remain stable as described in [gopkg.in](https://gopkg.in).
License
-------
The yaml package is licensed under the MIT and Apache License 2.0 licenses.
Please see the LICENSE file for details.
Example
-------
```Go
package main
import (
"fmt"
"log"
"gopkg.in/yaml.v3"
)
var data = `
a: Easy!
b:
c: 2
d: [3, 4]
`
// Note: struct fields must be public in order for unmarshal to
// correctly populate the data.
type T struct {
A string
B struct {
RenamedC int `yaml:"c"`
D []int `yaml:",flow"`
}
}
func main() {
t := T{}
err := yaml.Unmarshal([]byte(data), &t)
if err != nil {
log.Fatalf("error: %v", err)
}
fmt.Printf("--- t:\n%v\n\n", t)
d, err := yaml.Marshal(&t)
if err != nil {
log.Fatalf("error: %v", err)
}
fmt.Printf("--- t dump:\n%s\n\n", string(d))
m := make(map[interface{}]interface{})
err = yaml.Unmarshal([]byte(data), &m)
if err != nil {
log.Fatalf("error: %v", err)
}
fmt.Printf("--- m:\n%v\n\n", m)
d, err = yaml.Marshal(&m)
if err != nil {
log.Fatalf("error: %v", err)
}
fmt.Printf("--- m dump:\n%s\n\n", string(d))
}
```
This example will generate the following output:
```
--- t:
{Easy! {2 [3 4]}}
--- t dump:
a: Easy!
b:
c: 2
d: [3, 4]
--- m:
map[a:Easy! b:map[c:2 d:[3 4]]]
--- m dump:
a: Easy!
b:
c: 2
d:
- 3
- 4
```

747
vendor/gopkg.in/yaml.v3/apic.go generated vendored Normal file
View file

@ -0,0 +1,747 @@
//
// Copyright (c) 2011-2019 Canonical Ltd
// Copyright (c) 2006-2010 Kirill Simonov
//
// Permission is hereby granted, free of charge, to any person obtaining a copy of
// this software and associated documentation files (the "Software"), to deal in
// the Software without restriction, including without limitation the rights to
// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
// of the Software, and to permit persons to whom the Software is furnished to do
// so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
package yaml
import (
"io"
)
func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) {
//fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens))
// Check if we can move the queue at the beginning of the buffer.
if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) {
if parser.tokens_head != len(parser.tokens) {
copy(parser.tokens, parser.tokens[parser.tokens_head:])
}
parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head]
parser.tokens_head = 0
}
parser.tokens = append(parser.tokens, *token)
if pos < 0 {
return
}
copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:])
parser.tokens[parser.tokens_head+pos] = *token
}
// Create a new parser object.
func yaml_parser_initialize(parser *yaml_parser_t) bool {
*parser = yaml_parser_t{
raw_buffer: make([]byte, 0, input_raw_buffer_size),
buffer: make([]byte, 0, input_buffer_size),
}
return true
}
// Destroy a parser object.
func yaml_parser_delete(parser *yaml_parser_t) {
*parser = yaml_parser_t{}
}
// String read handler.
func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
if parser.input_pos == len(parser.input) {
return 0, io.EOF
}
n = copy(buffer, parser.input[parser.input_pos:])
parser.input_pos += n
return n, nil
}
// Reader read handler.
func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
return parser.input_reader.Read(buffer)
}
// Set a string input.
func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) {
if parser.read_handler != nil {
panic("must set the input source only once")
}
parser.read_handler = yaml_string_read_handler
parser.input = input
parser.input_pos = 0
}
// Set a file input.
func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) {
if parser.read_handler != nil {
panic("must set the input source only once")
}
parser.read_handler = yaml_reader_read_handler
parser.input_reader = r
}
// Set the source encoding.
func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) {
if parser.encoding != yaml_ANY_ENCODING {
panic("must set the encoding only once")
}
parser.encoding = encoding
}
// Create a new emitter object.
func yaml_emitter_initialize(emitter *yaml_emitter_t) {
*emitter = yaml_emitter_t{
buffer: make([]byte, output_buffer_size),
raw_buffer: make([]byte, 0, output_raw_buffer_size),
states: make([]yaml_emitter_state_t, 0, initial_stack_size),
events: make([]yaml_event_t, 0, initial_queue_size),
best_width: -1,
}
}
// Destroy an emitter object.
func yaml_emitter_delete(emitter *yaml_emitter_t) {
*emitter = yaml_emitter_t{}
}
// String write handler.
func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
*emitter.output_buffer = append(*emitter.output_buffer, buffer...)
return nil
}
// yaml_writer_write_handler uses emitter.output_writer to write the
// emitted text.
func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
_, err := emitter.output_writer.Write(buffer)
return err
}
// Set a string output.
func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) {
if emitter.write_handler != nil {
panic("must set the output target only once")
}
emitter.write_handler = yaml_string_write_handler
emitter.output_buffer = output_buffer
}
// Set a file output.
func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) {
if emitter.write_handler != nil {
panic("must set the output target only once")
}
emitter.write_handler = yaml_writer_write_handler
emitter.output_writer = w
}
// Set the output encoding.
func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) {
if emitter.encoding != yaml_ANY_ENCODING {
panic("must set the output encoding only once")
}
emitter.encoding = encoding
}
// Set the canonical output style.
func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) {
emitter.canonical = canonical
}
// Set the indentation increment.
func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) {
if indent < 2 || indent > 9 {
indent = 2
}
emitter.best_indent = indent
}
// Set the preferred line width.
func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) {
if width < 0 {
width = -1
}
emitter.best_width = width
}
// Set if unescaped non-ASCII characters are allowed.
func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) {
emitter.unicode = unicode
}
// Set the preferred line break character.
func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) {
emitter.line_break = line_break
}
///*
// * Destroy a token object.
// */
//
//YAML_DECLARE(void)
//yaml_token_delete(yaml_token_t *token)
//{
// assert(token); // Non-NULL token object expected.
//
// switch (token.type)
// {
// case YAML_TAG_DIRECTIVE_TOKEN:
// yaml_free(token.data.tag_directive.handle);
// yaml_free(token.data.tag_directive.prefix);
// break;
//
// case YAML_ALIAS_TOKEN:
// yaml_free(token.data.alias.value);
// break;
//
// case YAML_ANCHOR_TOKEN:
// yaml_free(token.data.anchor.value);
// break;
//
// case YAML_TAG_TOKEN:
// yaml_free(token.data.tag.handle);
// yaml_free(token.data.tag.suffix);
// break;
//
// case YAML_SCALAR_TOKEN:
// yaml_free(token.data.scalar.value);
// break;
//
// default:
// break;
// }
//
// memset(token, 0, sizeof(yaml_token_t));
//}
//
///*
// * Check if a string is a valid UTF-8 sequence.
// *
// * Check 'reader.c' for more details on UTF-8 encoding.
// */
//
//static int
//yaml_check_utf8(yaml_char_t *start, size_t length)
//{
// yaml_char_t *end = start+length;
// yaml_char_t *pointer = start;
//
// while (pointer < end) {
// unsigned char octet;
// unsigned int width;
// unsigned int value;
// size_t k;
//
// octet = pointer[0];
// width = (octet & 0x80) == 0x00 ? 1 :
// (octet & 0xE0) == 0xC0 ? 2 :
// (octet & 0xF0) == 0xE0 ? 3 :
// (octet & 0xF8) == 0xF0 ? 4 : 0;
// value = (octet & 0x80) == 0x00 ? octet & 0x7F :
// (octet & 0xE0) == 0xC0 ? octet & 0x1F :
// (octet & 0xF0) == 0xE0 ? octet & 0x0F :
// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0;
// if (!width) return 0;
// if (pointer+width > end) return 0;
// for (k = 1; k < width; k ++) {
// octet = pointer[k];
// if ((octet & 0xC0) != 0x80) return 0;
// value = (value << 6) + (octet & 0x3F);
// }
// if (!((width == 1) ||
// (width == 2 && value >= 0x80) ||
// (width == 3 && value >= 0x800) ||
// (width == 4 && value >= 0x10000))) return 0;
//
// pointer += width;
// }
//
// return 1;
//}
//
// Create STREAM-START.
func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) {
*event = yaml_event_t{
typ: yaml_STREAM_START_EVENT,
encoding: encoding,
}
}
// Create STREAM-END.
func yaml_stream_end_event_initialize(event *yaml_event_t) {
*event = yaml_event_t{
typ: yaml_STREAM_END_EVENT,
}
}
// Create DOCUMENT-START.
func yaml_document_start_event_initialize(
event *yaml_event_t,
version_directive *yaml_version_directive_t,
tag_directives []yaml_tag_directive_t,
implicit bool,
) {
*event = yaml_event_t{
typ: yaml_DOCUMENT_START_EVENT,
version_directive: version_directive,
tag_directives: tag_directives,
implicit: implicit,
}
}
// Create DOCUMENT-END.
func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) {
*event = yaml_event_t{
typ: yaml_DOCUMENT_END_EVENT,
implicit: implicit,
}
}
// Create ALIAS.
func yaml_alias_event_initialize(event *yaml_event_t, anchor []byte) bool {
*event = yaml_event_t{
typ: yaml_ALIAS_EVENT,
anchor: anchor,
}
return true
}
// Create SCALAR.
func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool {
*event = yaml_event_t{
typ: yaml_SCALAR_EVENT,
anchor: anchor,
tag: tag,
value: value,
implicit: plain_implicit,
quoted_implicit: quoted_implicit,
style: yaml_style_t(style),
}
return true
}
// Create SEQUENCE-START.
func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool {
*event = yaml_event_t{
typ: yaml_SEQUENCE_START_EVENT,
anchor: anchor,
tag: tag,
implicit: implicit,
style: yaml_style_t(style),
}
return true
}
// Create SEQUENCE-END.
func yaml_sequence_end_event_initialize(event *yaml_event_t) bool {
*event = yaml_event_t{
typ: yaml_SEQUENCE_END_EVENT,
}
return true
}
// Create MAPPING-START.
func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) {
*event = yaml_event_t{
typ: yaml_MAPPING_START_EVENT,
anchor: anchor,
tag: tag,
implicit: implicit,
style: yaml_style_t(style),
}
}
// Create MAPPING-END.
func yaml_mapping_end_event_initialize(event *yaml_event_t) {
*event = yaml_event_t{
typ: yaml_MAPPING_END_EVENT,
}
}
// Destroy an event object.
func yaml_event_delete(event *yaml_event_t) {
*event = yaml_event_t{}
}
///*
// * Create a document object.
// */
//
//YAML_DECLARE(int)
//yaml_document_initialize(document *yaml_document_t,
// version_directive *yaml_version_directive_t,
// tag_directives_start *yaml_tag_directive_t,
// tag_directives_end *yaml_tag_directive_t,
// start_implicit int, end_implicit int)
//{
// struct {
// error yaml_error_type_t
// } context
// struct {
// start *yaml_node_t
// end *yaml_node_t
// top *yaml_node_t
// } nodes = { NULL, NULL, NULL }
// version_directive_copy *yaml_version_directive_t = NULL
// struct {
// start *yaml_tag_directive_t
// end *yaml_tag_directive_t
// top *yaml_tag_directive_t
// } tag_directives_copy = { NULL, NULL, NULL }
// value yaml_tag_directive_t = { NULL, NULL }
// mark yaml_mark_t = { 0, 0, 0 }
//
// assert(document) // Non-NULL document object is expected.
// assert((tag_directives_start && tag_directives_end) ||
// (tag_directives_start == tag_directives_end))
// // Valid tag directives are expected.
//
// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error
//
// if (version_directive) {
// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t))
// if (!version_directive_copy) goto error
// version_directive_copy.major = version_directive.major
// version_directive_copy.minor = version_directive.minor
// }
//
// if (tag_directives_start != tag_directives_end) {
// tag_directive *yaml_tag_directive_t
// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE))
// goto error
// for (tag_directive = tag_directives_start
// tag_directive != tag_directives_end; tag_directive ++) {
// assert(tag_directive.handle)
// assert(tag_directive.prefix)
// if (!yaml_check_utf8(tag_directive.handle,
// strlen((char *)tag_directive.handle)))
// goto error
// if (!yaml_check_utf8(tag_directive.prefix,
// strlen((char *)tag_directive.prefix)))
// goto error
// value.handle = yaml_strdup(tag_directive.handle)
// value.prefix = yaml_strdup(tag_directive.prefix)
// if (!value.handle || !value.prefix) goto error
// if (!PUSH(&context, tag_directives_copy, value))
// goto error
// value.handle = NULL
// value.prefix = NULL
// }
// }
//
// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy,
// tag_directives_copy.start, tag_directives_copy.top,
// start_implicit, end_implicit, mark, mark)
//
// return 1
//
//error:
// STACK_DEL(&context, nodes)
// yaml_free(version_directive_copy)
// while (!STACK_EMPTY(&context, tag_directives_copy)) {
// value yaml_tag_directive_t = POP(&context, tag_directives_copy)
// yaml_free(value.handle)
// yaml_free(value.prefix)
// }
// STACK_DEL(&context, tag_directives_copy)
// yaml_free(value.handle)
// yaml_free(value.prefix)
//
// return 0
//}
//
///*
// * Destroy a document object.
// */
//
//YAML_DECLARE(void)
//yaml_document_delete(document *yaml_document_t)
//{
// struct {
// error yaml_error_type_t
// } context
// tag_directive *yaml_tag_directive_t
//
// context.error = YAML_NO_ERROR // Eliminate a compiler warning.
//
// assert(document) // Non-NULL document object is expected.
//
// while (!STACK_EMPTY(&context, document.nodes)) {
// node yaml_node_t = POP(&context, document.nodes)
// yaml_free(node.tag)
// switch (node.type) {
// case YAML_SCALAR_NODE:
// yaml_free(node.data.scalar.value)
// break
// case YAML_SEQUENCE_NODE:
// STACK_DEL(&context, node.data.sequence.items)
// break
// case YAML_MAPPING_NODE:
// STACK_DEL(&context, node.data.mapping.pairs)
// break
// default:
// assert(0) // Should not happen.
// }
// }
// STACK_DEL(&context, document.nodes)
//
// yaml_free(document.version_directive)
// for (tag_directive = document.tag_directives.start
// tag_directive != document.tag_directives.end
// tag_directive++) {
// yaml_free(tag_directive.handle)
// yaml_free(tag_directive.prefix)
// }
// yaml_free(document.tag_directives.start)
//
// memset(document, 0, sizeof(yaml_document_t))
//}
//
///**
// * Get a document node.
// */
//
//YAML_DECLARE(yaml_node_t *)
//yaml_document_get_node(document *yaml_document_t, index int)
//{
// assert(document) // Non-NULL document object is expected.
//
// if (index > 0 && document.nodes.start + index <= document.nodes.top) {
// return document.nodes.start + index - 1
// }
// return NULL
//}
//
///**
// * Get the root object.
// */
//
//YAML_DECLARE(yaml_node_t *)
//yaml_document_get_root_node(document *yaml_document_t)
//{
// assert(document) // Non-NULL document object is expected.
//
// if (document.nodes.top != document.nodes.start) {
// return document.nodes.start
// }
// return NULL
//}
//
///*
// * Add a scalar node to a document.
// */
//
//YAML_DECLARE(int)
//yaml_document_add_scalar(document *yaml_document_t,
// tag *yaml_char_t, value *yaml_char_t, length int,
// style yaml_scalar_style_t)
//{
// struct {
// error yaml_error_type_t
// } context
// mark yaml_mark_t = { 0, 0, 0 }
// tag_copy *yaml_char_t = NULL
// value_copy *yaml_char_t = NULL
// node yaml_node_t
//
// assert(document) // Non-NULL document object is expected.
// assert(value) // Non-NULL value is expected.
//
// if (!tag) {
// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG
// }
//
// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
// tag_copy = yaml_strdup(tag)
// if (!tag_copy) goto error
//
// if (length < 0) {
// length = strlen((char *)value)
// }
//
// if (!yaml_check_utf8(value, length)) goto error
// value_copy = yaml_malloc(length+1)
// if (!value_copy) goto error
// memcpy(value_copy, value, length)
// value_copy[length] = '\0'
//
// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark)
// if (!PUSH(&context, document.nodes, node)) goto error
//
// return document.nodes.top - document.nodes.start
//
//error:
// yaml_free(tag_copy)
// yaml_free(value_copy)
//
// return 0
//}
//
///*
// * Add a sequence node to a document.
// */
//
//YAML_DECLARE(int)
//yaml_document_add_sequence(document *yaml_document_t,
// tag *yaml_char_t, style yaml_sequence_style_t)
//{
// struct {
// error yaml_error_type_t
// } context
// mark yaml_mark_t = { 0, 0, 0 }
// tag_copy *yaml_char_t = NULL
// struct {
// start *yaml_node_item_t
// end *yaml_node_item_t
// top *yaml_node_item_t
// } items = { NULL, NULL, NULL }
// node yaml_node_t
//
// assert(document) // Non-NULL document object is expected.
//
// if (!tag) {
// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG
// }
//
// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
// tag_copy = yaml_strdup(tag)
// if (!tag_copy) goto error
//
// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error
//
// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end,
// style, mark, mark)
// if (!PUSH(&context, document.nodes, node)) goto error
//
// return document.nodes.top - document.nodes.start
//
//error:
// STACK_DEL(&context, items)
// yaml_free(tag_copy)
//
// return 0
//}
//
///*
// * Add a mapping node to a document.
// */
//
//YAML_DECLARE(int)
//yaml_document_add_mapping(document *yaml_document_t,
// tag *yaml_char_t, style yaml_mapping_style_t)
//{
// struct {
// error yaml_error_type_t
// } context
// mark yaml_mark_t = { 0, 0, 0 }
// tag_copy *yaml_char_t = NULL
// struct {
// start *yaml_node_pair_t
// end *yaml_node_pair_t
// top *yaml_node_pair_t
// } pairs = { NULL, NULL, NULL }
// node yaml_node_t
//
// assert(document) // Non-NULL document object is expected.
//
// if (!tag) {
// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG
// }
//
// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
// tag_copy = yaml_strdup(tag)
// if (!tag_copy) goto error
//
// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error
//
// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end,
// style, mark, mark)
// if (!PUSH(&context, document.nodes, node)) goto error
//
// return document.nodes.top - document.nodes.start
//
//error:
// STACK_DEL(&context, pairs)
// yaml_free(tag_copy)
//
// return 0
//}
//
///*
// * Append an item to a sequence node.
// */
//
//YAML_DECLARE(int)
//yaml_document_append_sequence_item(document *yaml_document_t,
// sequence int, item int)
//{
// struct {
// error yaml_error_type_t
// } context
//
// assert(document) // Non-NULL document is required.
// assert(sequence > 0
// && document.nodes.start + sequence <= document.nodes.top)
// // Valid sequence id is required.
// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE)
// // A sequence node is required.
// assert(item > 0 && document.nodes.start + item <= document.nodes.top)
// // Valid item id is required.
//
// if (!PUSH(&context,
// document.nodes.start[sequence-1].data.sequence.items, item))
// return 0
//
// return 1
//}
//
///*
// * Append a pair of a key and a value to a mapping node.
// */
//
//YAML_DECLARE(int)
//yaml_document_append_mapping_pair(document *yaml_document_t,
// mapping int, key int, value int)
//{
// struct {
// error yaml_error_type_t
// } context
//
// pair yaml_node_pair_t
//
// assert(document) // Non-NULL document is required.
// assert(mapping > 0
// && document.nodes.start + mapping <= document.nodes.top)
// // Valid mapping id is required.
// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE)
// // A mapping node is required.
// assert(key > 0 && document.nodes.start + key <= document.nodes.top)
// // Valid key id is required.
// assert(value > 0 && document.nodes.start + value <= document.nodes.top)
// // Valid value id is required.
//
// pair.key = key
// pair.value = value
//
// if (!PUSH(&context,
// document.nodes.start[mapping-1].data.mapping.pairs, pair))
// return 0
//
// return 1
//}
//
//

1000
vendor/gopkg.in/yaml.v3/decode.go generated vendored Normal file

File diff suppressed because it is too large Load diff

2020
vendor/gopkg.in/yaml.v3/emitterc.go generated vendored Normal file

File diff suppressed because it is too large Load diff

577
vendor/gopkg.in/yaml.v3/encode.go generated vendored Normal file
View file

@ -0,0 +1,577 @@
//
// Copyright (c) 2011-2019 Canonical Ltd
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package yaml
import (
"encoding"
"fmt"
"io"
"reflect"
"regexp"
"sort"
"strconv"
"strings"
"time"
"unicode/utf8"
)
type encoder struct {
emitter yaml_emitter_t
event yaml_event_t
out []byte
flow bool
indent int
doneInit bool
}
func newEncoder() *encoder {
e := &encoder{}
yaml_emitter_initialize(&e.emitter)
yaml_emitter_set_output_string(&e.emitter, &e.out)
yaml_emitter_set_unicode(&e.emitter, true)
return e
}
func newEncoderWithWriter(w io.Writer) *encoder {
e := &encoder{}
yaml_emitter_initialize(&e.emitter)
yaml_emitter_set_output_writer(&e.emitter, w)
yaml_emitter_set_unicode(&e.emitter, true)
return e
}
func (e *encoder) init() {
if e.doneInit {
return
}
if e.indent == 0 {
e.indent = 4
}
e.emitter.best_indent = e.indent
yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING)
e.emit()
e.doneInit = true
}
func (e *encoder) finish() {
e.emitter.open_ended = false
yaml_stream_end_event_initialize(&e.event)
e.emit()
}
func (e *encoder) destroy() {
yaml_emitter_delete(&e.emitter)
}
func (e *encoder) emit() {
// This will internally delete the e.event value.
e.must(yaml_emitter_emit(&e.emitter, &e.event))
}
func (e *encoder) must(ok bool) {
if !ok {
msg := e.emitter.problem
if msg == "" {
msg = "unknown problem generating YAML content"
}
failf("%s", msg)
}
}
func (e *encoder) marshalDoc(tag string, in reflect.Value) {
e.init()
var node *Node
if in.IsValid() {
node, _ = in.Interface().(*Node)
}
if node != nil && node.Kind == DocumentNode {
e.nodev(in)
} else {
yaml_document_start_event_initialize(&e.event, nil, nil, true)
e.emit()
e.marshal(tag, in)
yaml_document_end_event_initialize(&e.event, true)
e.emit()
}
}
func (e *encoder) marshal(tag string, in reflect.Value) {
tag = shortTag(tag)
if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() {
e.nilv()
return
}
iface := in.Interface()
switch value := iface.(type) {
case *Node:
e.nodev(in)
return
case Node:
if !in.CanAddr() {
var n = reflect.New(in.Type()).Elem()
n.Set(in)
in = n
}
e.nodev(in.Addr())
return
case time.Time:
e.timev(tag, in)
return
case *time.Time:
e.timev(tag, in.Elem())
return
case time.Duration:
e.stringv(tag, reflect.ValueOf(value.String()))
return
case Marshaler:
v, err := value.MarshalYAML()
if err != nil {
fail(err)
}
if v == nil {
e.nilv()
return
}
e.marshal(tag, reflect.ValueOf(v))
return
case encoding.TextMarshaler:
text, err := value.MarshalText()
if err != nil {
fail(err)
}
in = reflect.ValueOf(string(text))
case nil:
e.nilv()
return
}
switch in.Kind() {
case reflect.Interface:
e.marshal(tag, in.Elem())
case reflect.Map:
e.mapv(tag, in)
case reflect.Ptr:
e.marshal(tag, in.Elem())
case reflect.Struct:
e.structv(tag, in)
case reflect.Slice, reflect.Array:
e.slicev(tag, in)
case reflect.String:
e.stringv(tag, in)
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
e.intv(tag, in)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
e.uintv(tag, in)
case reflect.Float32, reflect.Float64:
e.floatv(tag, in)
case reflect.Bool:
e.boolv(tag, in)
default:
panic("cannot marshal type: " + in.Type().String())
}
}
func (e *encoder) mapv(tag string, in reflect.Value) {
e.mappingv(tag, func() {
keys := keyList(in.MapKeys())
sort.Sort(keys)
for _, k := range keys {
e.marshal("", k)
e.marshal("", in.MapIndex(k))
}
})
}
func (e *encoder) fieldByIndex(v reflect.Value, index []int) (field reflect.Value) {
for _, num := range index {
for {
if v.Kind() == reflect.Ptr {
if v.IsNil() {
return reflect.Value{}
}
v = v.Elem()
continue
}
break
}
v = v.Field(num)
}
return v
}
func (e *encoder) structv(tag string, in reflect.Value) {
sinfo, err := getStructInfo(in.Type())
if err != nil {
panic(err)
}
e.mappingv(tag, func() {
for _, info := range sinfo.FieldsList {
var value reflect.Value
if info.Inline == nil {
value = in.Field(info.Num)
} else {
value = e.fieldByIndex(in, info.Inline)
if !value.IsValid() {
continue
}
}
if info.OmitEmpty && isZero(value) {
continue
}
e.marshal("", reflect.ValueOf(info.Key))
e.flow = info.Flow
e.marshal("", value)
}
if sinfo.InlineMap >= 0 {
m := in.Field(sinfo.InlineMap)
if m.Len() > 0 {
e.flow = false
keys := keyList(m.MapKeys())
sort.Sort(keys)
for _, k := range keys {
if _, found := sinfo.FieldsMap[k.String()]; found {
panic(fmt.Sprintf("cannot have key %q in inlined map: conflicts with struct field", k.String()))
}
e.marshal("", k)
e.flow = false
e.marshal("", m.MapIndex(k))
}
}
}
})
}
func (e *encoder) mappingv(tag string, f func()) {
implicit := tag == ""
style := yaml_BLOCK_MAPPING_STYLE
if e.flow {
e.flow = false
style = yaml_FLOW_MAPPING_STYLE
}
yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)
e.emit()
f()
yaml_mapping_end_event_initialize(&e.event)
e.emit()
}
func (e *encoder) slicev(tag string, in reflect.Value) {
implicit := tag == ""
style := yaml_BLOCK_SEQUENCE_STYLE
if e.flow {
e.flow = false
style = yaml_FLOW_SEQUENCE_STYLE
}
e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style))
e.emit()
n := in.Len()
for i := 0; i < n; i++ {
e.marshal("", in.Index(i))
}
e.must(yaml_sequence_end_event_initialize(&e.event))
e.emit()
}
// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1.
//
// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported
// in YAML 1.2 and by this package, but these should be marshalled quoted for
// the time being for compatibility with other parsers.
func isBase60Float(s string) (result bool) {
// Fast path.
if s == "" {
return false
}
c := s[0]
if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 {
return false
}
// Do the full match.
return base60float.MatchString(s)
}
// From http://yaml.org/type/float.html, except the regular expression there
// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix.
var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`)
// isOldBool returns whether s is bool notation as defined in YAML 1.1.
//
// We continue to force strings that YAML 1.1 would interpret as booleans to be
// rendered as quotes strings so that the marshalled output valid for YAML 1.1
// parsing.
func isOldBool(s string) (result bool) {
switch s {
case "y", "Y", "yes", "Yes", "YES", "on", "On", "ON",
"n", "N", "no", "No", "NO", "off", "Off", "OFF":
return true
default:
return false
}
}
func (e *encoder) stringv(tag string, in reflect.Value) {
var style yaml_scalar_style_t
s := in.String()
canUsePlain := true
switch {
case !utf8.ValidString(s):
if tag == binaryTag {
failf("explicitly tagged !!binary data must be base64-encoded")
}
if tag != "" {
failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag))
}
// It can't be encoded directly as YAML so use a binary tag
// and encode it as base64.
tag = binaryTag
s = encodeBase64(s)
case tag == "":
// Check to see if it would resolve to a specific
// tag when encoded unquoted. If it doesn't,
// there's no need to quote it.
rtag, _ := resolve("", s)
canUsePlain = rtag == strTag && !(isBase60Float(s) || isOldBool(s))
}
// Note: it's possible for user code to emit invalid YAML
// if they explicitly specify a tag and a string containing
// text that's incompatible with that tag.
switch {
case strings.Contains(s, "\n"):
if e.flow {
style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
} else {
style = yaml_LITERAL_SCALAR_STYLE
}
case canUsePlain:
style = yaml_PLAIN_SCALAR_STYLE
default:
style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
}
e.emitScalar(s, "", tag, style, nil, nil, nil, nil)
}
func (e *encoder) boolv(tag string, in reflect.Value) {
var s string
if in.Bool() {
s = "true"
} else {
s = "false"
}
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
}
func (e *encoder) intv(tag string, in reflect.Value) {
s := strconv.FormatInt(in.Int(), 10)
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
}
func (e *encoder) uintv(tag string, in reflect.Value) {
s := strconv.FormatUint(in.Uint(), 10)
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
}
func (e *encoder) timev(tag string, in reflect.Value) {
t := in.Interface().(time.Time)
s := t.Format(time.RFC3339Nano)
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
}
func (e *encoder) floatv(tag string, in reflect.Value) {
// Issue #352: When formatting, use the precision of the underlying value
precision := 64
if in.Kind() == reflect.Float32 {
precision = 32
}
s := strconv.FormatFloat(in.Float(), 'g', -1, precision)
switch s {
case "+Inf":
s = ".inf"
case "-Inf":
s = "-.inf"
case "NaN":
s = ".nan"
}
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
}
func (e *encoder) nilv() {
e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
}
func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t, head, line, foot, tail []byte) {
// TODO Kill this function. Replace all initialize calls by their underlining Go literals.
implicit := tag == ""
if !implicit {
tag = longTag(tag)
}
e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style))
e.event.head_comment = head
e.event.line_comment = line
e.event.foot_comment = foot
e.event.tail_comment = tail
e.emit()
}
func (e *encoder) nodev(in reflect.Value) {
e.node(in.Interface().(*Node), "")
}
func (e *encoder) node(node *Node, tail string) {
// Zero nodes behave as nil.
if node.Kind == 0 && node.IsZero() {
e.nilv()
return
}
// If the tag was not explicitly requested, and dropping it won't change the
// implicit tag of the value, don't include it in the presentation.
var tag = node.Tag
var stag = shortTag(tag)
var forceQuoting bool
if tag != "" && node.Style&TaggedStyle == 0 {
if node.Kind == ScalarNode {
if stag == strTag && node.Style&(SingleQuotedStyle|DoubleQuotedStyle|LiteralStyle|FoldedStyle) != 0 {
tag = ""
} else {
rtag, _ := resolve("", node.Value)
if rtag == stag {
tag = ""
} else if stag == strTag {
tag = ""
forceQuoting = true
}
}
} else {
var rtag string
switch node.Kind {
case MappingNode:
rtag = mapTag
case SequenceNode:
rtag = seqTag
}
if rtag == stag {
tag = ""
}
}
}
switch node.Kind {
case DocumentNode:
yaml_document_start_event_initialize(&e.event, nil, nil, true)
e.event.head_comment = []byte(node.HeadComment)
e.emit()
for _, node := range node.Content {
e.node(node, "")
}
yaml_document_end_event_initialize(&e.event, true)
e.event.foot_comment = []byte(node.FootComment)
e.emit()
case SequenceNode:
style := yaml_BLOCK_SEQUENCE_STYLE
if node.Style&FlowStyle != 0 {
style = yaml_FLOW_SEQUENCE_STYLE
}
e.must(yaml_sequence_start_event_initialize(&e.event, []byte(node.Anchor), []byte(longTag(tag)), tag == "", style))
e.event.head_comment = []byte(node.HeadComment)
e.emit()
for _, node := range node.Content {
e.node(node, "")
}
e.must(yaml_sequence_end_event_initialize(&e.event))
e.event.line_comment = []byte(node.LineComment)
e.event.foot_comment = []byte(node.FootComment)
e.emit()
case MappingNode:
style := yaml_BLOCK_MAPPING_STYLE
if node.Style&FlowStyle != 0 {
style = yaml_FLOW_MAPPING_STYLE
}
yaml_mapping_start_event_initialize(&e.event, []byte(node.Anchor), []byte(longTag(tag)), tag == "", style)
e.event.tail_comment = []byte(tail)
e.event.head_comment = []byte(node.HeadComment)
e.emit()
// The tail logic below moves the foot comment of prior keys to the following key,
// since the value for each key may be a nested structure and the foot needs to be
// processed only the entirety of the value is streamed. The last tail is processed
// with the mapping end event.
var tail string
for i := 0; i+1 < len(node.Content); i += 2 {
k := node.Content[i]
foot := k.FootComment
if foot != "" {
kopy := *k
kopy.FootComment = ""
k = &kopy
}
e.node(k, tail)
tail = foot
v := node.Content[i+1]
e.node(v, "")
}
yaml_mapping_end_event_initialize(&e.event)
e.event.tail_comment = []byte(tail)
e.event.line_comment = []byte(node.LineComment)
e.event.foot_comment = []byte(node.FootComment)
e.emit()
case AliasNode:
yaml_alias_event_initialize(&e.event, []byte(node.Value))
e.event.head_comment = []byte(node.HeadComment)
e.event.line_comment = []byte(node.LineComment)
e.event.foot_comment = []byte(node.FootComment)
e.emit()
case ScalarNode:
value := node.Value
if !utf8.ValidString(value) {
if stag == binaryTag {
failf("explicitly tagged !!binary data must be base64-encoded")
}
if stag != "" {
failf("cannot marshal invalid UTF-8 data as %s", stag)
}
// It can't be encoded directly as YAML so use a binary tag
// and encode it as base64.
tag = binaryTag
value = encodeBase64(value)
}
style := yaml_PLAIN_SCALAR_STYLE
switch {
case node.Style&DoubleQuotedStyle != 0:
style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
case node.Style&SingleQuotedStyle != 0:
style = yaml_SINGLE_QUOTED_SCALAR_STYLE
case node.Style&LiteralStyle != 0:
style = yaml_LITERAL_SCALAR_STYLE
case node.Style&FoldedStyle != 0:
style = yaml_FOLDED_SCALAR_STYLE
case strings.Contains(value, "\n"):
style = yaml_LITERAL_SCALAR_STYLE
case forceQuoting:
style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
}
e.emitScalar(value, node.Anchor, tag, style, []byte(node.HeadComment), []byte(node.LineComment), []byte(node.FootComment), []byte(tail))
default:
failf("cannot encode node with unknown kind %d", node.Kind)
}
}

1258
vendor/gopkg.in/yaml.v3/parserc.go generated vendored Normal file

File diff suppressed because it is too large Load diff

434
vendor/gopkg.in/yaml.v3/readerc.go generated vendored Normal file
View file

@ -0,0 +1,434 @@
//
// Copyright (c) 2011-2019 Canonical Ltd
// Copyright (c) 2006-2010 Kirill Simonov
//
// Permission is hereby granted, free of charge, to any person obtaining a copy of
// this software and associated documentation files (the "Software"), to deal in
// the Software without restriction, including without limitation the rights to
// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
// of the Software, and to permit persons to whom the Software is furnished to do
// so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
package yaml
import (
"io"
)
// Set the reader error and return 0.
func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool {
parser.error = yaml_READER_ERROR
parser.problem = problem
parser.problem_offset = offset
parser.problem_value = value
return false
}
// Byte order marks.
const (
bom_UTF8 = "\xef\xbb\xbf"
bom_UTF16LE = "\xff\xfe"
bom_UTF16BE = "\xfe\xff"
)
// Determine the input stream encoding by checking the BOM symbol. If no BOM is
// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure.
func yaml_parser_determine_encoding(parser *yaml_parser_t) bool {
// Ensure that we had enough bytes in the raw buffer.
for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 {
if !yaml_parser_update_raw_buffer(parser) {
return false
}
}
// Determine the encoding.
buf := parser.raw_buffer
pos := parser.raw_buffer_pos
avail := len(buf) - pos
if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] {
parser.encoding = yaml_UTF16LE_ENCODING
parser.raw_buffer_pos += 2
parser.offset += 2
} else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] {
parser.encoding = yaml_UTF16BE_ENCODING
parser.raw_buffer_pos += 2
parser.offset += 2
} else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] {
parser.encoding = yaml_UTF8_ENCODING
parser.raw_buffer_pos += 3
parser.offset += 3
} else {
parser.encoding = yaml_UTF8_ENCODING
}
return true
}
// Update the raw buffer.
func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool {
size_read := 0
// Return if the raw buffer is full.
if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) {
return true
}
// Return on EOF.
if parser.eof {
return true
}
// Move the remaining bytes in the raw buffer to the beginning.
if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) {
copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:])
}
parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos]
parser.raw_buffer_pos = 0
// Call the read handler to fill the buffer.
size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)])
parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read]
if err == io.EOF {
parser.eof = true
} else if err != nil {
return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1)
}
return true
}
// Ensure that the buffer contains at least `length` characters.
// Return true on success, false on failure.
//
// The length is supposed to be significantly less that the buffer size.
func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool {
if parser.read_handler == nil {
panic("read handler must be set")
}
// [Go] This function was changed to guarantee the requested length size at EOF.
// The fact we need to do this is pretty awful, but the description above implies
// for that to be the case, and there are tests
// If the EOF flag is set and the raw buffer is empty, do nothing.
if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) {
// [Go] ACTUALLY! Read the documentation of this function above.
// This is just broken. To return true, we need to have the
// given length in the buffer. Not doing that means every single
// check that calls this function to make sure the buffer has a
// given length is Go) panicking; or C) accessing invalid memory.
//return true
}
// Return if the buffer contains enough characters.
if parser.unread >= length {
return true
}
// Determine the input encoding if it is not known yet.
if parser.encoding == yaml_ANY_ENCODING {
if !yaml_parser_determine_encoding(parser) {
return false
}
}
// Move the unread characters to the beginning of the buffer.
buffer_len := len(parser.buffer)
if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len {
copy(parser.buffer, parser.buffer[parser.buffer_pos:])
buffer_len -= parser.buffer_pos
parser.buffer_pos = 0
} else if parser.buffer_pos == buffer_len {
buffer_len = 0
parser.buffer_pos = 0
}
// Open the whole buffer for writing, and cut it before returning.
parser.buffer = parser.buffer[:cap(parser.buffer)]
// Fill the buffer until it has enough characters.
first := true
for parser.unread < length {
// Fill the raw buffer if necessary.
if !first || parser.raw_buffer_pos == len(parser.raw_buffer) {
if !yaml_parser_update_raw_buffer(parser) {
parser.buffer = parser.buffer[:buffer_len]
return false
}
}
first = false
// Decode the raw buffer.
inner:
for parser.raw_buffer_pos != len(parser.raw_buffer) {
var value rune
var width int
raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos
// Decode the next character.
switch parser.encoding {
case yaml_UTF8_ENCODING:
// Decode a UTF-8 character. Check RFC 3629
// (http://www.ietf.org/rfc/rfc3629.txt) for more details.
//
// The following table (taken from the RFC) is used for
// decoding.
//
// Char. number range | UTF-8 octet sequence
// (hexadecimal) | (binary)
// --------------------+------------------------------------
// 0000 0000-0000 007F | 0xxxxxxx
// 0000 0080-0000 07FF | 110xxxxx 10xxxxxx
// 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx
// 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
//
// Additionally, the characters in the range 0xD800-0xDFFF
// are prohibited as they are reserved for use with UTF-16
// surrogate pairs.
// Determine the length of the UTF-8 sequence.
octet := parser.raw_buffer[parser.raw_buffer_pos]
switch {
case octet&0x80 == 0x00:
width = 1
case octet&0xE0 == 0xC0:
width = 2
case octet&0xF0 == 0xE0:
width = 3
case octet&0xF8 == 0xF0:
width = 4
default:
// The leading octet is invalid.
return yaml_parser_set_reader_error(parser,
"invalid leading UTF-8 octet",
parser.offset, int(octet))
}
// Check if the raw buffer contains an incomplete character.
if width > raw_unread {
if parser.eof {
return yaml_parser_set_reader_error(parser,
"incomplete UTF-8 octet sequence",
parser.offset, -1)
}
break inner
}
// Decode the leading octet.
switch {
case octet&0x80 == 0x00:
value = rune(octet & 0x7F)
case octet&0xE0 == 0xC0:
value = rune(octet & 0x1F)
case octet&0xF0 == 0xE0:
value = rune(octet & 0x0F)
case octet&0xF8 == 0xF0:
value = rune(octet & 0x07)
default:
value = 0
}
// Check and decode the trailing octets.
for k := 1; k < width; k++ {
octet = parser.raw_buffer[parser.raw_buffer_pos+k]
// Check if the octet is valid.
if (octet & 0xC0) != 0x80 {
return yaml_parser_set_reader_error(parser,
"invalid trailing UTF-8 octet",
parser.offset+k, int(octet))
}
// Decode the octet.
value = (value << 6) + rune(octet&0x3F)
}
// Check the length of the sequence against the value.
switch {
case width == 1:
case width == 2 && value >= 0x80:
case width == 3 && value >= 0x800:
case width == 4 && value >= 0x10000:
default:
return yaml_parser_set_reader_error(parser,
"invalid length of a UTF-8 sequence",
parser.offset, -1)
}
// Check the range of the value.
if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF {
return yaml_parser_set_reader_error(parser,
"invalid Unicode character",
parser.offset, int(value))
}
case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING:
var low, high int
if parser.encoding == yaml_UTF16LE_ENCODING {
low, high = 0, 1
} else {
low, high = 1, 0
}
// The UTF-16 encoding is not as simple as one might
// naively think. Check RFC 2781
// (http://www.ietf.org/rfc/rfc2781.txt).
//
// Normally, two subsequent bytes describe a Unicode
// character. However a special technique (called a
// surrogate pair) is used for specifying character
// values larger than 0xFFFF.
//
// A surrogate pair consists of two pseudo-characters:
// high surrogate area (0xD800-0xDBFF)
// low surrogate area (0xDC00-0xDFFF)
//
// The following formulas are used for decoding
// and encoding characters using surrogate pairs:
//
// U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF)
// U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF)
// W1 = 110110yyyyyyyyyy
// W2 = 110111xxxxxxxxxx
//
// where U is the character value, W1 is the high surrogate
// area, W2 is the low surrogate area.
// Check for incomplete UTF-16 character.
if raw_unread < 2 {
if parser.eof {
return yaml_parser_set_reader_error(parser,
"incomplete UTF-16 character",
parser.offset, -1)
}
break inner
}
// Get the character.
value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) +
(rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8)
// Check for unexpected low surrogate area.
if value&0xFC00 == 0xDC00 {
return yaml_parser_set_reader_error(parser,
"unexpected low surrogate area",
parser.offset, int(value))
}
// Check for a high surrogate area.
if value&0xFC00 == 0xD800 {
width = 4
// Check for incomplete surrogate pair.
if raw_unread < 4 {
if parser.eof {
return yaml_parser_set_reader_error(parser,
"incomplete UTF-16 surrogate pair",
parser.offset, -1)
}
break inner
}
// Get the next character.
value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) +
(rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8)
// Check for a low surrogate area.
if value2&0xFC00 != 0xDC00 {
return yaml_parser_set_reader_error(parser,
"expected low surrogate area",
parser.offset+2, int(value2))
}
// Generate the value of the surrogate pair.
value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF)
} else {
width = 2
}
default:
panic("impossible")
}
// Check if the character is in the allowed range:
// #x9 | #xA | #xD | [#x20-#x7E] (8 bit)
// | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit)
// | [#x10000-#x10FFFF] (32 bit)
switch {
case value == 0x09:
case value == 0x0A:
case value == 0x0D:
case value >= 0x20 && value <= 0x7E:
case value == 0x85:
case value >= 0xA0 && value <= 0xD7FF:
case value >= 0xE000 && value <= 0xFFFD:
case value >= 0x10000 && value <= 0x10FFFF:
default:
return yaml_parser_set_reader_error(parser,
"control characters are not allowed",
parser.offset, int(value))
}
// Move the raw pointers.
parser.raw_buffer_pos += width
parser.offset += width
// Finally put the character into the buffer.
if value <= 0x7F {
// 0000 0000-0000 007F . 0xxxxxxx
parser.buffer[buffer_len+0] = byte(value)
buffer_len += 1
} else if value <= 0x7FF {
// 0000 0080-0000 07FF . 110xxxxx 10xxxxxx
parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6))
parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F))
buffer_len += 2
} else if value <= 0xFFFF {
// 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx
parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12))
parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F))
parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F))
buffer_len += 3
} else {
// 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18))
parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F))
parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F))
parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F))
buffer_len += 4
}
parser.unread++
}
// On EOF, put NUL into the buffer and return.
if parser.eof {
parser.buffer[buffer_len] = 0
buffer_len++
parser.unread++
break
}
}
// [Go] Read the documentation of this function above. To return true,
// we need to have the given length in the buffer. Not doing that means
// every single check that calls this function to make sure the buffer
// has a given length is Go) panicking; or C) accessing invalid memory.
// This happens here due to the EOF above breaking early.
for buffer_len < length {
parser.buffer[buffer_len] = 0
buffer_len++
}
parser.buffer = parser.buffer[:buffer_len]
return true
}

326
vendor/gopkg.in/yaml.v3/resolve.go generated vendored Normal file
View file

@ -0,0 +1,326 @@
//
// Copyright (c) 2011-2019 Canonical Ltd
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package yaml
import (
"encoding/base64"
"math"
"regexp"
"strconv"
"strings"
"time"
)
type resolveMapItem struct {
value interface{}
tag string
}
var resolveTable = make([]byte, 256)
var resolveMap = make(map[string]resolveMapItem)
func init() {
t := resolveTable
t[int('+')] = 'S' // Sign
t[int('-')] = 'S'
for _, c := range "0123456789" {
t[int(c)] = 'D' // Digit
}
for _, c := range "yYnNtTfFoO~" {
t[int(c)] = 'M' // In map
}
t[int('.')] = '.' // Float (potentially in map)
var resolveMapList = []struct {
v interface{}
tag string
l []string
}{
{true, boolTag, []string{"true", "True", "TRUE"}},
{false, boolTag, []string{"false", "False", "FALSE"}},
{nil, nullTag, []string{"", "~", "null", "Null", "NULL"}},
{math.NaN(), floatTag, []string{".nan", ".NaN", ".NAN"}},
{math.Inf(+1), floatTag, []string{".inf", ".Inf", ".INF"}},
{math.Inf(+1), floatTag, []string{"+.inf", "+.Inf", "+.INF"}},
{math.Inf(-1), floatTag, []string{"-.inf", "-.Inf", "-.INF"}},
{"<<", mergeTag, []string{"<<"}},
}
m := resolveMap
for _, item := range resolveMapList {
for _, s := range item.l {
m[s] = resolveMapItem{item.v, item.tag}
}
}
}
const (
nullTag = "!!null"
boolTag = "!!bool"
strTag = "!!str"
intTag = "!!int"
floatTag = "!!float"
timestampTag = "!!timestamp"
seqTag = "!!seq"
mapTag = "!!map"
binaryTag = "!!binary"
mergeTag = "!!merge"
)
var longTags = make(map[string]string)
var shortTags = make(map[string]string)
func init() {
for _, stag := range []string{nullTag, boolTag, strTag, intTag, floatTag, timestampTag, seqTag, mapTag, binaryTag, mergeTag} {
ltag := longTag(stag)
longTags[stag] = ltag
shortTags[ltag] = stag
}
}
const longTagPrefix = "tag:yaml.org,2002:"
func shortTag(tag string) string {
if strings.HasPrefix(tag, longTagPrefix) {
if stag, ok := shortTags[tag]; ok {
return stag
}
return "!!" + tag[len(longTagPrefix):]
}
return tag
}
func longTag(tag string) string {
if strings.HasPrefix(tag, "!!") {
if ltag, ok := longTags[tag]; ok {
return ltag
}
return longTagPrefix + tag[2:]
}
return tag
}
func resolvableTag(tag string) bool {
switch tag {
case "", strTag, boolTag, intTag, floatTag, nullTag, timestampTag:
return true
}
return false
}
var yamlStyleFloat = regexp.MustCompile(`^[-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?$`)
func resolve(tag string, in string) (rtag string, out interface{}) {
tag = shortTag(tag)
if !resolvableTag(tag) {
return tag, in
}
defer func() {
switch tag {
case "", rtag, strTag, binaryTag:
return
case floatTag:
if rtag == intTag {
switch v := out.(type) {
case int64:
rtag = floatTag
out = float64(v)
return
case int:
rtag = floatTag
out = float64(v)
return
}
}
}
failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag))
}()
// Any data is accepted as a !!str or !!binary.
// Otherwise, the prefix is enough of a hint about what it might be.
hint := byte('N')
if in != "" {
hint = resolveTable[in[0]]
}
if hint != 0 && tag != strTag && tag != binaryTag {
// Handle things we can lookup in a map.
if item, ok := resolveMap[in]; ok {
return item.tag, item.value
}
// Base 60 floats are a bad idea, were dropped in YAML 1.2, and
// are purposefully unsupported here. They're still quoted on
// the way out for compatibility with other parser, though.
switch hint {
case 'M':
// We've already checked the map above.
case '.':
// Not in the map, so maybe a normal float.
floatv, err := strconv.ParseFloat(in, 64)
if err == nil {
return floatTag, floatv
}
case 'D', 'S':
// Int, float, or timestamp.
// Only try values as a timestamp if the value is unquoted or there's an explicit
// !!timestamp tag.
if tag == "" || tag == timestampTag {
t, ok := parseTimestamp(in)
if ok {
return timestampTag, t
}
}
plain := strings.Replace(in, "_", "", -1)
intv, err := strconv.ParseInt(plain, 0, 64)
if err == nil {
if intv == int64(int(intv)) {
return intTag, int(intv)
} else {
return intTag, intv
}
}
uintv, err := strconv.ParseUint(plain, 0, 64)
if err == nil {
return intTag, uintv
}
if yamlStyleFloat.MatchString(plain) {
floatv, err := strconv.ParseFloat(plain, 64)
if err == nil {
return floatTag, floatv
}
}
if strings.HasPrefix(plain, "0b") {
intv, err := strconv.ParseInt(plain[2:], 2, 64)
if err == nil {
if intv == int64(int(intv)) {
return intTag, int(intv)
} else {
return intTag, intv
}
}
uintv, err := strconv.ParseUint(plain[2:], 2, 64)
if err == nil {
return intTag, uintv
}
} else if strings.HasPrefix(plain, "-0b") {
intv, err := strconv.ParseInt("-"+plain[3:], 2, 64)
if err == nil {
if true || intv == int64(int(intv)) {
return intTag, int(intv)
} else {
return intTag, intv
}
}
}
// Octals as introduced in version 1.2 of the spec.
// Octals from the 1.1 spec, spelled as 0777, are still
// decoded by default in v3 as well for compatibility.
// May be dropped in v4 depending on how usage evolves.
if strings.HasPrefix(plain, "0o") {
intv, err := strconv.ParseInt(plain[2:], 8, 64)
if err == nil {
if intv == int64(int(intv)) {
return intTag, int(intv)
} else {
return intTag, intv
}
}
uintv, err := strconv.ParseUint(plain[2:], 8, 64)
if err == nil {
return intTag, uintv
}
} else if strings.HasPrefix(plain, "-0o") {
intv, err := strconv.ParseInt("-"+plain[3:], 8, 64)
if err == nil {
if true || intv == int64(int(intv)) {
return intTag, int(intv)
} else {
return intTag, intv
}
}
}
default:
panic("internal error: missing handler for resolver table: " + string(rune(hint)) + " (with " + in + ")")
}
}
return strTag, in
}
// encodeBase64 encodes s as base64 that is broken up into multiple lines
// as appropriate for the resulting length.
func encodeBase64(s string) string {
const lineLen = 70
encLen := base64.StdEncoding.EncodedLen(len(s))
lines := encLen/lineLen + 1
buf := make([]byte, encLen*2+lines)
in := buf[0:encLen]
out := buf[encLen:]
base64.StdEncoding.Encode(in, []byte(s))
k := 0
for i := 0; i < len(in); i += lineLen {
j := i + lineLen
if j > len(in) {
j = len(in)
}
k += copy(out[k:], in[i:j])
if lines > 1 {
out[k] = '\n'
k++
}
}
return string(out[:k])
}
// This is a subset of the formats allowed by the regular expression
// defined at http://yaml.org/type/timestamp.html.
var allowedTimestampFormats = []string{
"2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields.
"2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t".
"2006-1-2 15:4:5.999999999", // space separated with no time zone
"2006-1-2", // date only
// Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5"
// from the set of examples.
}
// parseTimestamp parses s as a timestamp string and
// returns the timestamp and reports whether it succeeded.
// Timestamp formats are defined at http://yaml.org/type/timestamp.html
func parseTimestamp(s string) (time.Time, bool) {
// TODO write code to check all the formats supported by
// http://yaml.org/type/timestamp.html instead of using time.Parse.
// Quick check: all date formats start with YYYY-.
i := 0
for ; i < len(s); i++ {
if c := s[i]; c < '0' || c > '9' {
break
}
}
if i != 4 || i == len(s) || s[i] != '-' {
return time.Time{}, false
}
for _, format := range allowedTimestampFormats {
if t, err := time.Parse(format, s); err == nil {
return t, true
}
}
return time.Time{}, false
}

3038
vendor/gopkg.in/yaml.v3/scannerc.go generated vendored Normal file

File diff suppressed because it is too large Load diff

134
vendor/gopkg.in/yaml.v3/sorter.go generated vendored Normal file
View file

@ -0,0 +1,134 @@
//
// Copyright (c) 2011-2019 Canonical Ltd
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package yaml
import (
"reflect"
"unicode"
)
type keyList []reflect.Value
func (l keyList) Len() int { return len(l) }
func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
func (l keyList) Less(i, j int) bool {
a := l[i]
b := l[j]
ak := a.Kind()
bk := b.Kind()
for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() {
a = a.Elem()
ak = a.Kind()
}
for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() {
b = b.Elem()
bk = b.Kind()
}
af, aok := keyFloat(a)
bf, bok := keyFloat(b)
if aok && bok {
if af != bf {
return af < bf
}
if ak != bk {
return ak < bk
}
return numLess(a, b)
}
if ak != reflect.String || bk != reflect.String {
return ak < bk
}
ar, br := []rune(a.String()), []rune(b.String())
digits := false
for i := 0; i < len(ar) && i < len(br); i++ {
if ar[i] == br[i] {
digits = unicode.IsDigit(ar[i])
continue
}
al := unicode.IsLetter(ar[i])
bl := unicode.IsLetter(br[i])
if al && bl {
return ar[i] < br[i]
}
if al || bl {
if digits {
return al
} else {
return bl
}
}
var ai, bi int
var an, bn int64
if ar[i] == '0' || br[i] == '0' {
for j := i - 1; j >= 0 && unicode.IsDigit(ar[j]); j-- {
if ar[j] != '0' {
an = 1
bn = 1
break
}
}
}
for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ {
an = an*10 + int64(ar[ai]-'0')
}
for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ {
bn = bn*10 + int64(br[bi]-'0')
}
if an != bn {
return an < bn
}
if ai != bi {
return ai < bi
}
return ar[i] < br[i]
}
return len(ar) < len(br)
}
// keyFloat returns a float value for v if it is a number/bool
// and whether it is a number/bool or not.
func keyFloat(v reflect.Value) (f float64, ok bool) {
switch v.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return float64(v.Int()), true
case reflect.Float32, reflect.Float64:
return v.Float(), true
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return float64(v.Uint()), true
case reflect.Bool:
if v.Bool() {
return 1, true
}
return 0, true
}
return 0, false
}
// numLess returns whether a < b.
// a and b must necessarily have the same kind.
func numLess(a, b reflect.Value) bool {
switch a.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return a.Int() < b.Int()
case reflect.Float32, reflect.Float64:
return a.Float() < b.Float()
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return a.Uint() < b.Uint()
case reflect.Bool:
return !a.Bool() && b.Bool()
}
panic("not a number")
}

48
vendor/gopkg.in/yaml.v3/writerc.go generated vendored Normal file
View file

@ -0,0 +1,48 @@
//
// Copyright (c) 2011-2019 Canonical Ltd
// Copyright (c) 2006-2010 Kirill Simonov
//
// Permission is hereby granted, free of charge, to any person obtaining a copy of
// this software and associated documentation files (the "Software"), to deal in
// the Software without restriction, including without limitation the rights to
// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
// of the Software, and to permit persons to whom the Software is furnished to do
// so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
package yaml
// Set the writer error and return false.
func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool {
emitter.error = yaml_WRITER_ERROR
emitter.problem = problem
return false
}
// Flush the output buffer.
func yaml_emitter_flush(emitter *yaml_emitter_t) bool {
if emitter.write_handler == nil {
panic("write handler not set")
}
// Check if the buffer is empty.
if emitter.buffer_pos == 0 {
return true
}
if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil {
return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
}
emitter.buffer_pos = 0
return true
}

698
vendor/gopkg.in/yaml.v3/yaml.go generated vendored Normal file
View file

@ -0,0 +1,698 @@
//
// Copyright (c) 2011-2019 Canonical Ltd
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package yaml implements YAML support for the Go language.
//
// Source code and other details for the project are available at GitHub:
//
// https://github.com/go-yaml/yaml
//
package yaml
import (
"errors"
"fmt"
"io"
"reflect"
"strings"
"sync"
"unicode/utf8"
)
// The Unmarshaler interface may be implemented by types to customize their
// behavior when being unmarshaled from a YAML document.
type Unmarshaler interface {
UnmarshalYAML(value *Node) error
}
type obsoleteUnmarshaler interface {
UnmarshalYAML(unmarshal func(interface{}) error) error
}
// The Marshaler interface may be implemented by types to customize their
// behavior when being marshaled into a YAML document. The returned value
// is marshaled in place of the original value implementing Marshaler.
//
// If an error is returned by MarshalYAML, the marshaling procedure stops
// and returns with the provided error.
type Marshaler interface {
MarshalYAML() (interface{}, error)
}
// Unmarshal decodes the first document found within the in byte slice
// and assigns decoded values into the out value.
//
// Maps and pointers (to a struct, string, int, etc) are accepted as out
// values. If an internal pointer within a struct is not initialized,
// the yaml package will initialize it if necessary for unmarshalling
// the provided data. The out parameter must not be nil.
//
// The type of the decoded values should be compatible with the respective
// values in out. If one or more values cannot be decoded due to a type
// mismatches, decoding continues partially until the end of the YAML
// content, and a *yaml.TypeError is returned with details for all
// missed values.
//
// Struct fields are only unmarshalled if they are exported (have an
// upper case first letter), and are unmarshalled using the field name
// lowercased as the default key. Custom keys may be defined via the
// "yaml" name in the field tag: the content preceding the first comma
// is used as the key, and the following comma-separated options are
// used to tweak the marshalling process (see Marshal).
// Conflicting names result in a runtime error.
//
// For example:
//
// type T struct {
// F int `yaml:"a,omitempty"`
// B int
// }
// var t T
// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t)
//
// See the documentation of Marshal for the format of tags and a list of
// supported tag options.
//
func Unmarshal(in []byte, out interface{}) (err error) {
return unmarshal(in, out, false)
}
// A Decoder reads and decodes YAML values from an input stream.
type Decoder struct {
parser *parser
knownFields bool
}
// NewDecoder returns a new decoder that reads from r.
//
// The decoder introduces its own buffering and may read
// data from r beyond the YAML values requested.
func NewDecoder(r io.Reader) *Decoder {
return &Decoder{
parser: newParserFromReader(r),
}
}
// KnownFields ensures that the keys in decoded mappings to
// exist as fields in the struct being decoded into.
func (dec *Decoder) KnownFields(enable bool) {
dec.knownFields = enable
}
// Decode reads the next YAML-encoded value from its input
// and stores it in the value pointed to by v.
//
// See the documentation for Unmarshal for details about the
// conversion of YAML into a Go value.
func (dec *Decoder) Decode(v interface{}) (err error) {
d := newDecoder()
d.knownFields = dec.knownFields
defer handleErr(&err)
node := dec.parser.parse()
if node == nil {
return io.EOF
}
out := reflect.ValueOf(v)
if out.Kind() == reflect.Ptr && !out.IsNil() {
out = out.Elem()
}
d.unmarshal(node, out)
if len(d.terrors) > 0 {
return &TypeError{d.terrors}
}
return nil
}
// Decode decodes the node and stores its data into the value pointed to by v.
//
// See the documentation for Unmarshal for details about the
// conversion of YAML into a Go value.
func (n *Node) Decode(v interface{}) (err error) {
d := newDecoder()
defer handleErr(&err)
out := reflect.ValueOf(v)
if out.Kind() == reflect.Ptr && !out.IsNil() {
out = out.Elem()
}
d.unmarshal(n, out)
if len(d.terrors) > 0 {
return &TypeError{d.terrors}
}
return nil
}
func unmarshal(in []byte, out interface{}, strict bool) (err error) {
defer handleErr(&err)
d := newDecoder()
p := newParser(in)
defer p.destroy()
node := p.parse()
if node != nil {
v := reflect.ValueOf(out)
if v.Kind() == reflect.Ptr && !v.IsNil() {
v = v.Elem()
}
d.unmarshal(node, v)
}
if len(d.terrors) > 0 {
return &TypeError{d.terrors}
}
return nil
}
// Marshal serializes the value provided into a YAML document. The structure
// of the generated document will reflect the structure of the value itself.
// Maps and pointers (to struct, string, int, etc) are accepted as the in value.
//
// Struct fields are only marshalled if they are exported (have an upper case
// first letter), and are marshalled using the field name lowercased as the
// default key. Custom keys may be defined via the "yaml" name in the field
// tag: the content preceding the first comma is used as the key, and the
// following comma-separated options are used to tweak the marshalling process.
// Conflicting names result in a runtime error.
//
// The field tag format accepted is:
//
// `(...) yaml:"[<key>][,<flag1>[,<flag2>]]" (...)`
//
// The following flags are currently supported:
//
// omitempty Only include the field if it's not set to the zero
// value for the type or to empty slices or maps.
// Zero valued structs will be omitted if all their public
// fields are zero, unless they implement an IsZero
// method (see the IsZeroer interface type), in which
// case the field will be excluded if IsZero returns true.
//
// flow Marshal using a flow style (useful for structs,
// sequences and maps).
//
// inline Inline the field, which must be a struct or a map,
// causing all of its fields or keys to be processed as if
// they were part of the outer struct. For maps, keys must
// not conflict with the yaml keys of other struct fields.
//
// In addition, if the key is "-", the field is ignored.
//
// For example:
//
// type T struct {
// F int `yaml:"a,omitempty"`
// B int
// }
// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n"
// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n"
//
func Marshal(in interface{}) (out []byte, err error) {
defer handleErr(&err)
e := newEncoder()
defer e.destroy()
e.marshalDoc("", reflect.ValueOf(in))
e.finish()
out = e.out
return
}
// An Encoder writes YAML values to an output stream.
type Encoder struct {
encoder *encoder
}
// NewEncoder returns a new encoder that writes to w.
// The Encoder should be closed after use to flush all data
// to w.
func NewEncoder(w io.Writer) *Encoder {
return &Encoder{
encoder: newEncoderWithWriter(w),
}
}
// Encode writes the YAML encoding of v to the stream.
// If multiple items are encoded to the stream, the
// second and subsequent document will be preceded
// with a "---" document separator, but the first will not.
//
// See the documentation for Marshal for details about the conversion of Go
// values to YAML.
func (e *Encoder) Encode(v interface{}) (err error) {
defer handleErr(&err)
e.encoder.marshalDoc("", reflect.ValueOf(v))
return nil
}
// Encode encodes value v and stores its representation in n.
//
// See the documentation for Marshal for details about the
// conversion of Go values into YAML.
func (n *Node) Encode(v interface{}) (err error) {
defer handleErr(&err)
e := newEncoder()
defer e.destroy()
e.marshalDoc("", reflect.ValueOf(v))
e.finish()
p := newParser(e.out)
p.textless = true
defer p.destroy()
doc := p.parse()
*n = *doc.Content[0]
return nil
}
// SetIndent changes the used indentation used when encoding.
func (e *Encoder) SetIndent(spaces int) {
if spaces < 0 {
panic("yaml: cannot indent to a negative number of spaces")
}
e.encoder.indent = spaces
}
// Close closes the encoder by writing any remaining data.
// It does not write a stream terminating string "...".
func (e *Encoder) Close() (err error) {
defer handleErr(&err)
e.encoder.finish()
return nil
}
func handleErr(err *error) {
if v := recover(); v != nil {
if e, ok := v.(yamlError); ok {
*err = e.err
} else {
panic(v)
}
}
}
type yamlError struct {
err error
}
func fail(err error) {
panic(yamlError{err})
}
func failf(format string, args ...interface{}) {
panic(yamlError{fmt.Errorf("yaml: "+format, args...)})
}
// A TypeError is returned by Unmarshal when one or more fields in
// the YAML document cannot be properly decoded into the requested
// types. When this error is returned, the value is still
// unmarshaled partially.
type TypeError struct {
Errors []string
}
func (e *TypeError) Error() string {
return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n "))
}
type Kind uint32
const (
DocumentNode Kind = 1 << iota
SequenceNode
MappingNode
ScalarNode
AliasNode
)
type Style uint32
const (
TaggedStyle Style = 1 << iota
DoubleQuotedStyle
SingleQuotedStyle
LiteralStyle
FoldedStyle
FlowStyle
)
// Node represents an element in the YAML document hierarchy. While documents
// are typically encoded and decoded into higher level types, such as structs
// and maps, Node is an intermediate representation that allows detailed
// control over the content being decoded or encoded.
//
// It's worth noting that although Node offers access into details such as
// line numbers, colums, and comments, the content when re-encoded will not
// have its original textual representation preserved. An effort is made to
// render the data plesantly, and to preserve comments near the data they
// describe, though.
//
// Values that make use of the Node type interact with the yaml package in the
// same way any other type would do, by encoding and decoding yaml data
// directly or indirectly into them.
//
// For example:
//
// var person struct {
// Name string
// Address yaml.Node
// }
// err := yaml.Unmarshal(data, &person)
//
// Or by itself:
//
// var person Node
// err := yaml.Unmarshal(data, &person)
//
type Node struct {
// Kind defines whether the node is a document, a mapping, a sequence,
// a scalar value, or an alias to another node. The specific data type of
// scalar nodes may be obtained via the ShortTag and LongTag methods.
Kind Kind
// Style allows customizing the apperance of the node in the tree.
Style Style
// Tag holds the YAML tag defining the data type for the value.
// When decoding, this field will always be set to the resolved tag,
// even when it wasn't explicitly provided in the YAML content.
// When encoding, if this field is unset the value type will be
// implied from the node properties, and if it is set, it will only
// be serialized into the representation if TaggedStyle is used or
// the implicit tag diverges from the provided one.
Tag string
// Value holds the unescaped and unquoted represenation of the value.
Value string
// Anchor holds the anchor name for this node, which allows aliases to point to it.
Anchor string
// Alias holds the node that this alias points to. Only valid when Kind is AliasNode.
Alias *Node
// Content holds contained nodes for documents, mappings, and sequences.
Content []*Node
// HeadComment holds any comments in the lines preceding the node and
// not separated by an empty line.
HeadComment string
// LineComment holds any comments at the end of the line where the node is in.
LineComment string
// FootComment holds any comments following the node and before empty lines.
FootComment string
// Line and Column hold the node position in the decoded YAML text.
// These fields are not respected when encoding the node.
Line int
Column int
}
// IsZero returns whether the node has all of its fields unset.
func (n *Node) IsZero() bool {
return n.Kind == 0 && n.Style == 0 && n.Tag == "" && n.Value == "" && n.Anchor == "" && n.Alias == nil && n.Content == nil &&
n.HeadComment == "" && n.LineComment == "" && n.FootComment == "" && n.Line == 0 && n.Column == 0
}
// LongTag returns the long form of the tag that indicates the data type for
// the node. If the Tag field isn't explicitly defined, one will be computed
// based on the node properties.
func (n *Node) LongTag() string {
return longTag(n.ShortTag())
}
// ShortTag returns the short form of the YAML tag that indicates data type for
// the node. If the Tag field isn't explicitly defined, one will be computed
// based on the node properties.
func (n *Node) ShortTag() string {
if n.indicatedString() {
return strTag
}
if n.Tag == "" || n.Tag == "!" {
switch n.Kind {
case MappingNode:
return mapTag
case SequenceNode:
return seqTag
case AliasNode:
if n.Alias != nil {
return n.Alias.ShortTag()
}
case ScalarNode:
tag, _ := resolve("", n.Value)
return tag
case 0:
// Special case to make the zero value convenient.
if n.IsZero() {
return nullTag
}
}
return ""
}
return shortTag(n.Tag)
}
func (n *Node) indicatedString() bool {
return n.Kind == ScalarNode &&
(shortTag(n.Tag) == strTag ||
(n.Tag == "" || n.Tag == "!") && n.Style&(SingleQuotedStyle|DoubleQuotedStyle|LiteralStyle|FoldedStyle) != 0)
}
// SetString is a convenience function that sets the node to a string value
// and defines its style in a pleasant way depending on its content.
func (n *Node) SetString(s string) {
n.Kind = ScalarNode
if utf8.ValidString(s) {
n.Value = s
n.Tag = strTag
} else {
n.Value = encodeBase64(s)
n.Tag = binaryTag
}
if strings.Contains(n.Value, "\n") {
n.Style = LiteralStyle
}
}
// --------------------------------------------------------------------------
// Maintain a mapping of keys to structure field indexes
// The code in this section was copied from mgo/bson.
// structInfo holds details for the serialization of fields of
// a given struct.
type structInfo struct {
FieldsMap map[string]fieldInfo
FieldsList []fieldInfo
// InlineMap is the number of the field in the struct that
// contains an ,inline map, or -1 if there's none.
InlineMap int
// InlineUnmarshalers holds indexes to inlined fields that
// contain unmarshaler values.
InlineUnmarshalers [][]int
}
type fieldInfo struct {
Key string
Num int
OmitEmpty bool
Flow bool
// Id holds the unique field identifier, so we can cheaply
// check for field duplicates without maintaining an extra map.
Id int
// Inline holds the field index if the field is part of an inlined struct.
Inline []int
}
var structMap = make(map[reflect.Type]*structInfo)
var fieldMapMutex sync.RWMutex
var unmarshalerType reflect.Type
func init() {
var v Unmarshaler
unmarshalerType = reflect.ValueOf(&v).Elem().Type()
}
func getStructInfo(st reflect.Type) (*structInfo, error) {
fieldMapMutex.RLock()
sinfo, found := structMap[st]
fieldMapMutex.RUnlock()
if found {
return sinfo, nil
}
n := st.NumField()
fieldsMap := make(map[string]fieldInfo)
fieldsList := make([]fieldInfo, 0, n)
inlineMap := -1
inlineUnmarshalers := [][]int(nil)
for i := 0; i != n; i++ {
field := st.Field(i)
if field.PkgPath != "" && !field.Anonymous {
continue // Private field
}
info := fieldInfo{Num: i}
tag := field.Tag.Get("yaml")
if tag == "" && strings.Index(string(field.Tag), ":") < 0 {
tag = string(field.Tag)
}
if tag == "-" {
continue
}
inline := false
fields := strings.Split(tag, ",")
if len(fields) > 1 {
for _, flag := range fields[1:] {
switch flag {
case "omitempty":
info.OmitEmpty = true
case "flow":
info.Flow = true
case "inline":
inline = true
default:
return nil, errors.New(fmt.Sprintf("unsupported flag %q in tag %q of type %s", flag, tag, st))
}
}
tag = fields[0]
}
if inline {
switch field.Type.Kind() {
case reflect.Map:
if inlineMap >= 0 {
return nil, errors.New("multiple ,inline maps in struct " + st.String())
}
if field.Type.Key() != reflect.TypeOf("") {
return nil, errors.New("option ,inline needs a map with string keys in struct " + st.String())
}
inlineMap = info.Num
case reflect.Struct, reflect.Ptr:
ftype := field.Type
for ftype.Kind() == reflect.Ptr {
ftype = ftype.Elem()
}
if ftype.Kind() != reflect.Struct {
return nil, errors.New("option ,inline may only be used on a struct or map field")
}
if reflect.PtrTo(ftype).Implements(unmarshalerType) {
inlineUnmarshalers = append(inlineUnmarshalers, []int{i})
} else {
sinfo, err := getStructInfo(ftype)
if err != nil {
return nil, err
}
for _, index := range sinfo.InlineUnmarshalers {
inlineUnmarshalers = append(inlineUnmarshalers, append([]int{i}, index...))
}
for _, finfo := range sinfo.FieldsList {
if _, found := fieldsMap[finfo.Key]; found {
msg := "duplicated key '" + finfo.Key + "' in struct " + st.String()
return nil, errors.New(msg)
}
if finfo.Inline == nil {
finfo.Inline = []int{i, finfo.Num}
} else {
finfo.Inline = append([]int{i}, finfo.Inline...)
}
finfo.Id = len(fieldsList)
fieldsMap[finfo.Key] = finfo
fieldsList = append(fieldsList, finfo)
}
}
default:
return nil, errors.New("option ,inline may only be used on a struct or map field")
}
continue
}
if tag != "" {
info.Key = tag
} else {
info.Key = strings.ToLower(field.Name)
}
if _, found = fieldsMap[info.Key]; found {
msg := "duplicated key '" + info.Key + "' in struct " + st.String()
return nil, errors.New(msg)
}
info.Id = len(fieldsList)
fieldsList = append(fieldsList, info)
fieldsMap[info.Key] = info
}
sinfo = &structInfo{
FieldsMap: fieldsMap,
FieldsList: fieldsList,
InlineMap: inlineMap,
InlineUnmarshalers: inlineUnmarshalers,
}
fieldMapMutex.Lock()
structMap[st] = sinfo
fieldMapMutex.Unlock()
return sinfo, nil
}
// IsZeroer is used to check whether an object is zero to
// determine whether it should be omitted when marshaling
// with the omitempty flag. One notable implementation
// is time.Time.
type IsZeroer interface {
IsZero() bool
}
func isZero(v reflect.Value) bool {
kind := v.Kind()
if z, ok := v.Interface().(IsZeroer); ok {
if (kind == reflect.Ptr || kind == reflect.Interface) && v.IsNil() {
return true
}
return z.IsZero()
}
switch kind {
case reflect.String:
return len(v.String()) == 0
case reflect.Interface, reflect.Ptr:
return v.IsNil()
case reflect.Slice:
return v.Len() == 0
case reflect.Map:
return v.Len() == 0
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return v.Int() == 0
case reflect.Float32, reflect.Float64:
return v.Float() == 0
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return v.Uint() == 0
case reflect.Bool:
return !v.Bool()
case reflect.Struct:
vt := v.Type()
for i := v.NumField() - 1; i >= 0; i-- {
if vt.Field(i).PkgPath != "" {
continue // Private field
}
if !isZero(v.Field(i)) {
return false
}
}
return true
}
return false
}

807
vendor/gopkg.in/yaml.v3/yamlh.go generated vendored Normal file
View file

@ -0,0 +1,807 @@
//
// Copyright (c) 2011-2019 Canonical Ltd
// Copyright (c) 2006-2010 Kirill Simonov
//
// Permission is hereby granted, free of charge, to any person obtaining a copy of
// this software and associated documentation files (the "Software"), to deal in
// the Software without restriction, including without limitation the rights to
// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
// of the Software, and to permit persons to whom the Software is furnished to do
// so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
package yaml
import (
"fmt"
"io"
)
// The version directive data.
type yaml_version_directive_t struct {
major int8 // The major version number.
minor int8 // The minor version number.
}
// The tag directive data.
type yaml_tag_directive_t struct {
handle []byte // The tag handle.
prefix []byte // The tag prefix.
}
type yaml_encoding_t int
// The stream encoding.
const (
// Let the parser choose the encoding.
yaml_ANY_ENCODING yaml_encoding_t = iota
yaml_UTF8_ENCODING // The default UTF-8 encoding.
yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM.
yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM.
)
type yaml_break_t int
// Line break types.
const (
// Let the parser choose the break type.
yaml_ANY_BREAK yaml_break_t = iota
yaml_CR_BREAK // Use CR for line breaks (Mac style).
yaml_LN_BREAK // Use LN for line breaks (Unix style).
yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style).
)
type yaml_error_type_t int
// Many bad things could happen with the parser and emitter.
const (
// No error is produced.
yaml_NO_ERROR yaml_error_type_t = iota
yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory.
yaml_READER_ERROR // Cannot read or decode the input stream.
yaml_SCANNER_ERROR // Cannot scan the input stream.
yaml_PARSER_ERROR // Cannot parse the input stream.
yaml_COMPOSER_ERROR // Cannot compose a YAML document.
yaml_WRITER_ERROR // Cannot write to the output stream.
yaml_EMITTER_ERROR // Cannot emit a YAML stream.
)
// The pointer position.
type yaml_mark_t struct {
index int // The position index.
line int // The position line.
column int // The position column.
}
// Node Styles
type yaml_style_t int8
type yaml_scalar_style_t yaml_style_t
// Scalar styles.
const (
// Let the emitter choose the style.
yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = 0
yaml_PLAIN_SCALAR_STYLE yaml_scalar_style_t = 1 << iota // The plain scalar style.
yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style.
yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style.
yaml_LITERAL_SCALAR_STYLE // The literal scalar style.
yaml_FOLDED_SCALAR_STYLE // The folded scalar style.
)
type yaml_sequence_style_t yaml_style_t
// Sequence styles.
const (
// Let the emitter choose the style.
yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota
yaml_BLOCK_SEQUENCE_STYLE // The block sequence style.
yaml_FLOW_SEQUENCE_STYLE // The flow sequence style.
)
type yaml_mapping_style_t yaml_style_t
// Mapping styles.
const (
// Let the emitter choose the style.
yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota
yaml_BLOCK_MAPPING_STYLE // The block mapping style.
yaml_FLOW_MAPPING_STYLE // The flow mapping style.
)
// Tokens
type yaml_token_type_t int
// Token types.
const (
// An empty token.
yaml_NO_TOKEN yaml_token_type_t = iota
yaml_STREAM_START_TOKEN // A STREAM-START token.
yaml_STREAM_END_TOKEN // A STREAM-END token.
yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token.
yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token.
yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token.
yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token.
yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token.
yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token.
yaml_BLOCK_END_TOKEN // A BLOCK-END token.
yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token.
yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token.
yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token.
yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token.
yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token.
yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token.
yaml_KEY_TOKEN // A KEY token.
yaml_VALUE_TOKEN // A VALUE token.
yaml_ALIAS_TOKEN // An ALIAS token.
yaml_ANCHOR_TOKEN // An ANCHOR token.
yaml_TAG_TOKEN // A TAG token.
yaml_SCALAR_TOKEN // A SCALAR token.
)
func (tt yaml_token_type_t) String() string {
switch tt {
case yaml_NO_TOKEN:
return "yaml_NO_TOKEN"
case yaml_STREAM_START_TOKEN:
return "yaml_STREAM_START_TOKEN"
case yaml_STREAM_END_TOKEN:
return "yaml_STREAM_END_TOKEN"
case yaml_VERSION_DIRECTIVE_TOKEN:
return "yaml_VERSION_DIRECTIVE_TOKEN"
case yaml_TAG_DIRECTIVE_TOKEN:
return "yaml_TAG_DIRECTIVE_TOKEN"
case yaml_DOCUMENT_START_TOKEN:
return "yaml_DOCUMENT_START_TOKEN"
case yaml_DOCUMENT_END_TOKEN:
return "yaml_DOCUMENT_END_TOKEN"
case yaml_BLOCK_SEQUENCE_START_TOKEN:
return "yaml_BLOCK_SEQUENCE_START_TOKEN"
case yaml_BLOCK_MAPPING_START_TOKEN:
return "yaml_BLOCK_MAPPING_START_TOKEN"
case yaml_BLOCK_END_TOKEN:
return "yaml_BLOCK_END_TOKEN"
case yaml_FLOW_SEQUENCE_START_TOKEN:
return "yaml_FLOW_SEQUENCE_START_TOKEN"
case yaml_FLOW_SEQUENCE_END_TOKEN:
return "yaml_FLOW_SEQUENCE_END_TOKEN"
case yaml_FLOW_MAPPING_START_TOKEN:
return "yaml_FLOW_MAPPING_START_TOKEN"
case yaml_FLOW_MAPPING_END_TOKEN:
return "yaml_FLOW_MAPPING_END_TOKEN"
case yaml_BLOCK_ENTRY_TOKEN:
return "yaml_BLOCK_ENTRY_TOKEN"
case yaml_FLOW_ENTRY_TOKEN:
return "yaml_FLOW_ENTRY_TOKEN"
case yaml_KEY_TOKEN:
return "yaml_KEY_TOKEN"
case yaml_VALUE_TOKEN:
return "yaml_VALUE_TOKEN"
case yaml_ALIAS_TOKEN:
return "yaml_ALIAS_TOKEN"
case yaml_ANCHOR_TOKEN:
return "yaml_ANCHOR_TOKEN"
case yaml_TAG_TOKEN:
return "yaml_TAG_TOKEN"
case yaml_SCALAR_TOKEN:
return "yaml_SCALAR_TOKEN"
}
return "<unknown token>"
}
// The token structure.
type yaml_token_t struct {
// The token type.
typ yaml_token_type_t
// The start/end of the token.
start_mark, end_mark yaml_mark_t
// The stream encoding (for yaml_STREAM_START_TOKEN).
encoding yaml_encoding_t
// The alias/anchor/scalar value or tag/tag directive handle
// (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN).
value []byte
// The tag suffix (for yaml_TAG_TOKEN).
suffix []byte
// The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN).
prefix []byte
// The scalar style (for yaml_SCALAR_TOKEN).
style yaml_scalar_style_t
// The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN).
major, minor int8
}
// Events
type yaml_event_type_t int8
// Event types.
const (
// An empty event.
yaml_NO_EVENT yaml_event_type_t = iota
yaml_STREAM_START_EVENT // A STREAM-START event.
yaml_STREAM_END_EVENT // A STREAM-END event.
yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event.
yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event.
yaml_ALIAS_EVENT // An ALIAS event.
yaml_SCALAR_EVENT // A SCALAR event.
yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event.
yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event.
yaml_MAPPING_START_EVENT // A MAPPING-START event.
yaml_MAPPING_END_EVENT // A MAPPING-END event.
yaml_TAIL_COMMENT_EVENT
)
var eventStrings = []string{
yaml_NO_EVENT: "none",
yaml_STREAM_START_EVENT: "stream start",
yaml_STREAM_END_EVENT: "stream end",
yaml_DOCUMENT_START_EVENT: "document start",
yaml_DOCUMENT_END_EVENT: "document end",
yaml_ALIAS_EVENT: "alias",
yaml_SCALAR_EVENT: "scalar",
yaml_SEQUENCE_START_EVENT: "sequence start",
yaml_SEQUENCE_END_EVENT: "sequence end",
yaml_MAPPING_START_EVENT: "mapping start",
yaml_MAPPING_END_EVENT: "mapping end",
yaml_TAIL_COMMENT_EVENT: "tail comment",
}
func (e yaml_event_type_t) String() string {
if e < 0 || int(e) >= len(eventStrings) {
return fmt.Sprintf("unknown event %d", e)
}
return eventStrings[e]
}
// The event structure.
type yaml_event_t struct {
// The event type.
typ yaml_event_type_t
// The start and end of the event.
start_mark, end_mark yaml_mark_t
// The document encoding (for yaml_STREAM_START_EVENT).
encoding yaml_encoding_t
// The version directive (for yaml_DOCUMENT_START_EVENT).
version_directive *yaml_version_directive_t
// The list of tag directives (for yaml_DOCUMENT_START_EVENT).
tag_directives []yaml_tag_directive_t
// The comments
head_comment []byte
line_comment []byte
foot_comment []byte
tail_comment []byte
// The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT).
anchor []byte
// The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
tag []byte
// The scalar value (for yaml_SCALAR_EVENT).
value []byte
// Is the document start/end indicator implicit, or the tag optional?
// (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT).
implicit bool
// Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT).
quoted_implicit bool
// The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
style yaml_style_t
}
func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) }
func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) }
func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) }
// Nodes
const (
yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null.
yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false.
yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values.
yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values.
yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values.
yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values.
yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences.
yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping.
// Not in original libyaml.
yaml_BINARY_TAG = "tag:yaml.org,2002:binary"
yaml_MERGE_TAG = "tag:yaml.org,2002:merge"
yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str.
yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq.
yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map.
)
type yaml_node_type_t int
// Node types.
const (
// An empty node.
yaml_NO_NODE yaml_node_type_t = iota
yaml_SCALAR_NODE // A scalar node.
yaml_SEQUENCE_NODE // A sequence node.
yaml_MAPPING_NODE // A mapping node.
)
// An element of a sequence node.
type yaml_node_item_t int
// An element of a mapping node.
type yaml_node_pair_t struct {
key int // The key of the element.
value int // The value of the element.
}
// The node structure.
type yaml_node_t struct {
typ yaml_node_type_t // The node type.
tag []byte // The node tag.
// The node data.
// The scalar parameters (for yaml_SCALAR_NODE).
scalar struct {
value []byte // The scalar value.
length int // The length of the scalar value.
style yaml_scalar_style_t // The scalar style.
}
// The sequence parameters (for YAML_SEQUENCE_NODE).
sequence struct {
items_data []yaml_node_item_t // The stack of sequence items.
style yaml_sequence_style_t // The sequence style.
}
// The mapping parameters (for yaml_MAPPING_NODE).
mapping struct {
pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value).
pairs_start *yaml_node_pair_t // The beginning of the stack.
pairs_end *yaml_node_pair_t // The end of the stack.
pairs_top *yaml_node_pair_t // The top of the stack.
style yaml_mapping_style_t // The mapping style.
}
start_mark yaml_mark_t // The beginning of the node.
end_mark yaml_mark_t // The end of the node.
}
// The document structure.
type yaml_document_t struct {
// The document nodes.
nodes []yaml_node_t
// The version directive.
version_directive *yaml_version_directive_t
// The list of tag directives.
tag_directives_data []yaml_tag_directive_t
tag_directives_start int // The beginning of the tag directives list.
tag_directives_end int // The end of the tag directives list.
start_implicit int // Is the document start indicator implicit?
end_implicit int // Is the document end indicator implicit?
// The start/end of the document.
start_mark, end_mark yaml_mark_t
}
// The prototype of a read handler.
//
// The read handler is called when the parser needs to read more bytes from the
// source. The handler should write not more than size bytes to the buffer.
// The number of written bytes should be set to the size_read variable.
//
// [in,out] data A pointer to an application data specified by
// yaml_parser_set_input().
// [out] buffer The buffer to write the data from the source.
// [in] size The size of the buffer.
// [out] size_read The actual number of bytes read from the source.
//
// On success, the handler should return 1. If the handler failed,
// the returned value should be 0. On EOF, the handler should set the
// size_read to 0 and return 1.
type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error)
// This structure holds information about a potential simple key.
type yaml_simple_key_t struct {
possible bool // Is a simple key possible?
required bool // Is a simple key required?
token_number int // The number of the token.
mark yaml_mark_t // The position mark.
}
// The states of the parser.
type yaml_parser_state_t int
const (
yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota
yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document.
yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START.
yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document.
yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END.
yaml_PARSE_BLOCK_NODE_STATE // Expect a block node.
yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence.
yaml_PARSE_FLOW_NODE_STATE // Expect a flow node.
yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence.
yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence.
yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence.
yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping.
yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key.
yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value.
yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence.
yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence.
yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping.
yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping.
yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry.
yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping.
yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping.
yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping.
yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping.
yaml_PARSE_END_STATE // Expect nothing.
)
func (ps yaml_parser_state_t) String() string {
switch ps {
case yaml_PARSE_STREAM_START_STATE:
return "yaml_PARSE_STREAM_START_STATE"
case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE"
case yaml_PARSE_DOCUMENT_START_STATE:
return "yaml_PARSE_DOCUMENT_START_STATE"
case yaml_PARSE_DOCUMENT_CONTENT_STATE:
return "yaml_PARSE_DOCUMENT_CONTENT_STATE"
case yaml_PARSE_DOCUMENT_END_STATE:
return "yaml_PARSE_DOCUMENT_END_STATE"
case yaml_PARSE_BLOCK_NODE_STATE:
return "yaml_PARSE_BLOCK_NODE_STATE"
case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE"
case yaml_PARSE_FLOW_NODE_STATE:
return "yaml_PARSE_FLOW_NODE_STATE"
case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE"
case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE"
case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE"
case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE"
case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE"
case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE"
case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE"
case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE"
case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE"
case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE"
case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE"
case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE"
case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
return "yaml_PARSE_FLOW_MAPPING_KEY_STATE"
case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE"
case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE"
case yaml_PARSE_END_STATE:
return "yaml_PARSE_END_STATE"
}
return "<unknown parser state>"
}
// This structure holds aliases data.
type yaml_alias_data_t struct {
anchor []byte // The anchor.
index int // The node id.
mark yaml_mark_t // The anchor mark.
}
// The parser structure.
//
// All members are internal. Manage the structure using the
// yaml_parser_ family of functions.
type yaml_parser_t struct {
// Error handling
error yaml_error_type_t // Error type.
problem string // Error description.
// The byte about which the problem occurred.
problem_offset int
problem_value int
problem_mark yaml_mark_t
// The error context.
context string
context_mark yaml_mark_t
// Reader stuff
read_handler yaml_read_handler_t // Read handler.
input_reader io.Reader // File input data.
input []byte // String input data.
input_pos int
eof bool // EOF flag
buffer []byte // The working buffer.
buffer_pos int // The current position of the buffer.
unread int // The number of unread characters in the buffer.
newlines int // The number of line breaks since last non-break/non-blank character
raw_buffer []byte // The raw buffer.
raw_buffer_pos int // The current position of the buffer.
encoding yaml_encoding_t // The input encoding.
offset int // The offset of the current position (in bytes).
mark yaml_mark_t // The mark of the current position.
// Comments
head_comment []byte // The current head comments
line_comment []byte // The current line comments
foot_comment []byte // The current foot comments
tail_comment []byte // Foot comment that happens at the end of a block.
stem_comment []byte // Comment in item preceding a nested structure (list inside list item, etc)
comments []yaml_comment_t // The folded comments for all parsed tokens
comments_head int
// Scanner stuff
stream_start_produced bool // Have we started to scan the input stream?
stream_end_produced bool // Have we reached the end of the input stream?
flow_level int // The number of unclosed '[' and '{' indicators.
tokens []yaml_token_t // The tokens queue.
tokens_head int // The head of the tokens queue.
tokens_parsed int // The number of tokens fetched from the queue.
token_available bool // Does the tokens queue contain a token ready for dequeueing.
indent int // The current indentation level.
indents []int // The indentation levels stack.
simple_key_allowed bool // May a simple key occur at the current position?
simple_keys []yaml_simple_key_t // The stack of simple keys.
simple_keys_by_tok map[int]int // possible simple_key indexes indexed by token_number
// Parser stuff
state yaml_parser_state_t // The current parser state.
states []yaml_parser_state_t // The parser states stack.
marks []yaml_mark_t // The stack of marks.
tag_directives []yaml_tag_directive_t // The list of TAG directives.
// Dumper stuff
aliases []yaml_alias_data_t // The alias data.
document *yaml_document_t // The currently parsed document.
}
type yaml_comment_t struct {
scan_mark yaml_mark_t // Position where scanning for comments started
token_mark yaml_mark_t // Position after which tokens will be associated with this comment
start_mark yaml_mark_t // Position of '#' comment mark
end_mark yaml_mark_t // Position where comment terminated
head []byte
line []byte
foot []byte
}
// Emitter Definitions
// The prototype of a write handler.
//
// The write handler is called when the emitter needs to flush the accumulated
// characters to the output. The handler should write @a size bytes of the
// @a buffer to the output.
//
// @param[in,out] data A pointer to an application data specified by
// yaml_emitter_set_output().
// @param[in] buffer The buffer with bytes to be written.
// @param[in] size The size of the buffer.
//
// @returns On success, the handler should return @c 1. If the handler failed,
// the returned value should be @c 0.
//
type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error
type yaml_emitter_state_t int
// The emitter states.
const (
// Expect STREAM-START.
yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota
yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END.
yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END.
yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document.
yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END.
yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence.
yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE // Expect the next item of a flow sequence, with the comma already written out
yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence.
yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping.
yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE // Expect the next key of a flow mapping, with the comma already written out
yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping.
yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping.
yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping.
yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence.
yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence.
yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping.
yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping.
yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping.
yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping.
yaml_EMIT_END_STATE // Expect nothing.
)
// The emitter structure.
//
// All members are internal. Manage the structure using the @c yaml_emitter_
// family of functions.
type yaml_emitter_t struct {
// Error handling
error yaml_error_type_t // Error type.
problem string // Error description.
// Writer stuff
write_handler yaml_write_handler_t // Write handler.
output_buffer *[]byte // String output data.
output_writer io.Writer // File output data.
buffer []byte // The working buffer.
buffer_pos int // The current position of the buffer.
raw_buffer []byte // The raw buffer.
raw_buffer_pos int // The current position of the buffer.
encoding yaml_encoding_t // The stream encoding.
// Emitter stuff
canonical bool // If the output is in the canonical style?
best_indent int // The number of indentation spaces.
best_width int // The preferred width of the output lines.
unicode bool // Allow unescaped non-ASCII characters?
line_break yaml_break_t // The preferred line break.
state yaml_emitter_state_t // The current emitter state.
states []yaml_emitter_state_t // The stack of states.
events []yaml_event_t // The event queue.
events_head int // The head of the event queue.
indents []int // The stack of indentation levels.
tag_directives []yaml_tag_directive_t // The list of tag directives.
indent int // The current indentation level.
flow_level int // The current flow level.
root_context bool // Is it the document root context?
sequence_context bool // Is it a sequence context?
mapping_context bool // Is it a mapping context?
simple_key_context bool // Is it a simple mapping key context?
line int // The current line.
column int // The current column.
whitespace bool // If the last character was a whitespace?
indention bool // If the last character was an indentation character (' ', '-', '?', ':')?
open_ended bool // If an explicit document end is required?
space_above bool // Is there's an empty line above?
foot_indent int // The indent used to write the foot comment above, or -1 if none.
// Anchor analysis.
anchor_data struct {
anchor []byte // The anchor value.
alias bool // Is it an alias?
}
// Tag analysis.
tag_data struct {
handle []byte // The tag handle.
suffix []byte // The tag suffix.
}
// Scalar analysis.
scalar_data struct {
value []byte // The scalar value.
multiline bool // Does the scalar contain line breaks?
flow_plain_allowed bool // Can the scalar be expessed in the flow plain style?
block_plain_allowed bool // Can the scalar be expressed in the block plain style?
single_quoted_allowed bool // Can the scalar be expressed in the single quoted style?
block_allowed bool // Can the scalar be expressed in the literal or folded styles?
style yaml_scalar_style_t // The output style.
}
// Comments
head_comment []byte
line_comment []byte
foot_comment []byte
tail_comment []byte
key_line_comment []byte
// Dumper stuff
opened bool // If the stream was already opened?
closed bool // If the stream was already closed?
// The information associated with the document nodes.
anchors *struct {
references int // The number of references.
anchor int // The anchor id.
serialized bool // If the node has been emitted?
}
last_anchor_id int // The last assigned anchor id.
document *yaml_document_t // The currently emitted document.
}

198
vendor/gopkg.in/yaml.v3/yamlprivateh.go generated vendored Normal file
View file

@ -0,0 +1,198 @@
//
// Copyright (c) 2011-2019 Canonical Ltd
// Copyright (c) 2006-2010 Kirill Simonov
//
// Permission is hereby granted, free of charge, to any person obtaining a copy of
// this software and associated documentation files (the "Software"), to deal in
// the Software without restriction, including without limitation the rights to
// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
// of the Software, and to permit persons to whom the Software is furnished to do
// so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
package yaml
const (
// The size of the input raw buffer.
input_raw_buffer_size = 512
// The size of the input buffer.
// It should be possible to decode the whole raw buffer.
input_buffer_size = input_raw_buffer_size * 3
// The size of the output buffer.
output_buffer_size = 128
// The size of the output raw buffer.
// It should be possible to encode the whole output buffer.
output_raw_buffer_size = (output_buffer_size*2 + 2)
// The size of other stacks and queues.
initial_stack_size = 16
initial_queue_size = 16
initial_string_size = 16
)
// Check if the character at the specified position is an alphabetical
// character, a digit, '_', or '-'.
func is_alpha(b []byte, i int) bool {
return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-'
}
// Check if the character at the specified position is a digit.
func is_digit(b []byte, i int) bool {
return b[i] >= '0' && b[i] <= '9'
}
// Get the value of a digit.
func as_digit(b []byte, i int) int {
return int(b[i]) - '0'
}
// Check if the character at the specified position is a hex-digit.
func is_hex(b []byte, i int) bool {
return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f'
}
// Get the value of a hex-digit.
func as_hex(b []byte, i int) int {
bi := b[i]
if bi >= 'A' && bi <= 'F' {
return int(bi) - 'A' + 10
}
if bi >= 'a' && bi <= 'f' {
return int(bi) - 'a' + 10
}
return int(bi) - '0'
}
// Check if the character is ASCII.
func is_ascii(b []byte, i int) bool {
return b[i] <= 0x7F
}
// Check if the character at the start of the buffer can be printed unescaped.
func is_printable(b []byte, i int) bool {
return ((b[i] == 0x0A) || // . == #x0A
(b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E
(b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF
(b[i] > 0xC2 && b[i] < 0xED) ||
(b[i] == 0xED && b[i+1] < 0xA0) ||
(b[i] == 0xEE) ||
(b[i] == 0xEF && // #xE000 <= . <= #xFFFD
!(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF
!(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF))))
}
// Check if the character at the specified position is NUL.
func is_z(b []byte, i int) bool {
return b[i] == 0x00
}
// Check if the beginning of the buffer is a BOM.
func is_bom(b []byte, i int) bool {
return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF
}
// Check if the character at the specified position is space.
func is_space(b []byte, i int) bool {
return b[i] == ' '
}
// Check if the character at the specified position is tab.
func is_tab(b []byte, i int) bool {
return b[i] == '\t'
}
// Check if the character at the specified position is blank (space or tab).
func is_blank(b []byte, i int) bool {
//return is_space(b, i) || is_tab(b, i)
return b[i] == ' ' || b[i] == '\t'
}
// Check if the character at the specified position is a line break.
func is_break(b []byte, i int) bool {
return (b[i] == '\r' || // CR (#xD)
b[i] == '\n' || // LF (#xA)
b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029)
}
func is_crlf(b []byte, i int) bool {
return b[i] == '\r' && b[i+1] == '\n'
}
// Check if the character is a line break or NUL.
func is_breakz(b []byte, i int) bool {
//return is_break(b, i) || is_z(b, i)
return (
// is_break:
b[i] == '\r' || // CR (#xD)
b[i] == '\n' || // LF (#xA)
b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
// is_z:
b[i] == 0)
}
// Check if the character is a line break, space, or NUL.
func is_spacez(b []byte, i int) bool {
//return is_space(b, i) || is_breakz(b, i)
return (
// is_space:
b[i] == ' ' ||
// is_breakz:
b[i] == '\r' || // CR (#xD)
b[i] == '\n' || // LF (#xA)
b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
b[i] == 0)
}
// Check if the character is a line break, space, tab, or NUL.
func is_blankz(b []byte, i int) bool {
//return is_blank(b, i) || is_breakz(b, i)
return (
// is_blank:
b[i] == ' ' || b[i] == '\t' ||
// is_breakz:
b[i] == '\r' || // CR (#xD)
b[i] == '\n' || // LF (#xA)
b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
b[i] == 0)
}
// Determine the width of the character.
func width(b byte) int {
// Don't replace these by a switch without first
// confirming that it is being inlined.
if b&0x80 == 0x00 {
return 1
}
if b&0xE0 == 0xC0 {
return 2
}
if b&0xF0 == 0xE0 {
return 3
}
if b&0xF8 == 0xF0 {
return 4
}
return 0
}

26
vendor/modules.txt vendored
View file

@ -163,6 +163,9 @@ github.com/coreos/go-systemd/v22/activation
# github.com/cyphar/filepath-securejoin v0.2.4 # github.com/cyphar/filepath-securejoin v0.2.4
## explicit; go 1.13 ## explicit; go 1.13
github.com/cyphar/filepath-securejoin github.com/cyphar/filepath-securejoin
# github.com/davecgh/go-spew v1.1.1
## explicit
github.com/davecgh/go-spew/spew
# github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f # github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f
## explicit ## explicit
github.com/dgryski/go-rendezvous github.com/dgryski/go-rendezvous
@ -273,12 +276,6 @@ github.com/klauspost/compress/internal/cpuinfo
github.com/klauspost/compress/internal/snapref github.com/klauspost/compress/internal/snapref
github.com/klauspost/compress/zstd github.com/klauspost/compress/zstd
github.com/klauspost/compress/zstd/internal/xxhash github.com/klauspost/compress/zstd/internal/xxhash
# github.com/kr/pretty v0.3.1
## explicit; go 1.12
github.com/kr/pretty
# github.com/kr/text v0.2.0
## explicit
github.com/kr/text
# github.com/kylelemons/godebug v1.1.0 # github.com/kylelemons/godebug v1.1.0
## explicit; go 1.11 ## explicit; go 1.11
github.com/kylelemons/godebug/diff github.com/kylelemons/godebug/diff
@ -300,6 +297,9 @@ github.com/opencontainers/image-spec/specs-go/v1
# github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 # github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8
## explicit; go 1.14 ## explicit; go 1.14
github.com/pkg/browser github.com/pkg/browser
# github.com/pmezard/go-difflib v1.0.0
## explicit
github.com/pmezard/go-difflib/difflib
# github.com/prometheus/client_golang v1.17.0 # github.com/prometheus/client_golang v1.17.0
## explicit; go 1.19 ## explicit; go 1.19
github.com/prometheus/client_golang/prometheus github.com/prometheus/client_golang/prometheus
@ -334,9 +334,6 @@ github.com/redis/go-redis/v9/internal/pool
github.com/redis/go-redis/v9/internal/proto github.com/redis/go-redis/v9/internal/proto
github.com/redis/go-redis/v9/internal/rand github.com/redis/go-redis/v9/internal/rand
github.com/redis/go-redis/v9/internal/util github.com/redis/go-redis/v9/internal/util
# github.com/rogpeppe/go-internal v1.11.0
## explicit; go 1.19
github.com/rogpeppe/go-internal/fmtsort
# github.com/sirupsen/logrus v1.9.3 # github.com/sirupsen/logrus v1.9.3
## explicit; go 1.13 ## explicit; go 1.13
github.com/sirupsen/logrus github.com/sirupsen/logrus
@ -346,6 +343,11 @@ github.com/spf13/cobra
# github.com/spf13/pflag v1.0.5 # github.com/spf13/pflag v1.0.5
## explicit; go 1.12 ## explicit; go 1.12
github.com/spf13/pflag github.com/spf13/pflag
# github.com/stretchr/testify v1.8.4
## explicit; go 1.20
github.com/stretchr/testify/assert
github.com/stretchr/testify/require
github.com/stretchr/testify/suite
# go.opencensus.io v0.24.0 # go.opencensus.io v0.24.0
## explicit; go 1.13 ## explicit; go 1.13
go.opencensus.io go.opencensus.io
@ -669,9 +671,9 @@ google.golang.org/protobuf/types/known/fieldmaskpb
google.golang.org/protobuf/types/known/structpb google.golang.org/protobuf/types/known/structpb
google.golang.org/protobuf/types/known/timestamppb google.golang.org/protobuf/types/known/timestamppb
google.golang.org/protobuf/types/known/wrapperspb google.golang.org/protobuf/types/known/wrapperspb
# gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c
## explicit; go 1.11
gopkg.in/check.v1
# gopkg.in/yaml.v2 v2.4.0 # gopkg.in/yaml.v2 v2.4.0
## explicit; go 1.15 ## explicit; go 1.15
gopkg.in/yaml.v2 gopkg.in/yaml.v2
# gopkg.in/yaml.v3 v3.0.1
## explicit
gopkg.in/yaml.v3