diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json deleted file mode 100644 index 8985d1ff..00000000 --- a/Godeps/Godeps.json +++ /dev/null @@ -1,458 +0,0 @@ -{ - "ImportPath": "github.com/docker/distribution", - "GoVersion": "go1.6", - "GodepVersion": "v74", - "Packages": [ - "./..." - ], - "Deps": [ - { - "ImportPath": "github.com/Azure/azure-sdk-for-go/storage", - "Comment": "v5.0.0-beta-6-g0b5fe2a", - "Rev": "0b5fe2abe0271ba07049eacaa65922d67c319543" - }, - { - "ImportPath": "github.com/Sirupsen/logrus", - "Comment": "v0.7.3", - "Rev": "55eb11d21d2a31a3cc93838241d04800f52e823d" - }, - { - "ImportPath": "github.com/Sirupsen/logrus/formatters/logstash", - "Comment": "v0.7.3", - "Rev": "55eb11d21d2a31a3cc93838241d04800f52e823d" - }, - { - "ImportPath": "github.com/aws/aws-sdk-go/aws", - "Comment": "v1.2.4", - "Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6" - }, - { - "ImportPath": "github.com/aws/aws-sdk-go/aws/awserr", - "Comment": "v1.2.4", - "Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6" - }, - { - "ImportPath": "github.com/aws/aws-sdk-go/aws/awsutil", - "Comment": "v1.2.4", - "Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6" - }, - { - "ImportPath": "github.com/aws/aws-sdk-go/aws/client", - "Comment": "v1.2.4", - "Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6" - }, - { - "ImportPath": "github.com/aws/aws-sdk-go/aws/client/metadata", - "Comment": "v1.2.4", - "Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6" - }, - { - "ImportPath": "github.com/aws/aws-sdk-go/aws/corehandlers", - "Comment": "v1.2.4", - "Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6" - }, - { - "ImportPath": "github.com/aws/aws-sdk-go/aws/credentials", - "Comment": "v1.2.4", - "Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6" - }, - { - "ImportPath": "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds", - "Comment": "v1.2.4", - "Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6" - }, - { - "ImportPath": "github.com/aws/aws-sdk-go/aws/defaults", - "Comment": "v1.2.4", - "Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6" - }, - { - "ImportPath": "github.com/aws/aws-sdk-go/aws/ec2metadata", - "Comment": "v1.2.4", - "Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6" - }, - { - "ImportPath": "github.com/aws/aws-sdk-go/aws/request", - "Comment": "v1.2.4", - "Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6" - }, - { - "ImportPath": "github.com/aws/aws-sdk-go/aws/session", - "Comment": "v1.2.4", - "Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6" - }, - { - "ImportPath": "github.com/aws/aws-sdk-go/aws/signer/v4", - "Comment": "v1.2.4", - "Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6" - }, - { - "ImportPath": "github.com/aws/aws-sdk-go/private/endpoints", - "Comment": "v1.2.4", - "Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6" - }, - { - "ImportPath": "github.com/aws/aws-sdk-go/private/protocol", - "Comment": "v1.2.4", - "Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6" - }, - { - "ImportPath": "github.com/aws/aws-sdk-go/private/protocol/query", - "Comment": "v1.2.4", - "Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6" - }, - { - "ImportPath": "github.com/aws/aws-sdk-go/private/protocol/query/queryutil", - "Comment": "v1.2.4", - "Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6" - }, - { - "ImportPath": "github.com/aws/aws-sdk-go/private/protocol/rest", - "Comment": "v1.2.4", - "Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6" - }, - { - "ImportPath": "github.com/aws/aws-sdk-go/private/protocol/restxml", - "Comment": "v1.2.4", - "Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6" - }, - { - "ImportPath": "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil", - "Comment": "v1.2.4", - "Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6" - }, - { - "ImportPath": "github.com/aws/aws-sdk-go/private/waiter", - "Comment": "v1.2.4", - "Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6" - }, - { - "ImportPath": "github.com/aws/aws-sdk-go/service/cloudfront/sign", - "Comment": "v1.2.4", - "Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6" - }, - { - "ImportPath": "github.com/aws/aws-sdk-go/service/s3", - "Comment": "v1.2.4", - "Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6" - }, - { - "ImportPath": "github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini", - "Comment": "v1.2.4", - "Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6" - }, - { - "ImportPath": "github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath", - "Comment": "v1.2.4", - "Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6" - }, - { - "ImportPath": "github.com/bugsnag/bugsnag-go", - "Comment": "v1.0.2-5-gb1d1530", - "Rev": "b1d153021fcd90ca3f080db36bec96dc690fb274" - }, - { - "ImportPath": "github.com/bugsnag/bugsnag-go/errors", - "Comment": "v1.0.2-5-gb1d1530", - "Rev": "b1d153021fcd90ca3f080db36bec96dc690fb274" - }, - { - "ImportPath": "github.com/bugsnag/osext", - "Rev": "0dd3f918b21bec95ace9dc86c7e70266cfc5c702" - }, - { - "ImportPath": "github.com/bugsnag/panicwrap", - "Comment": "1.0.0-2-ge2c2850", - "Rev": "e2c28503fcd0675329da73bf48b33404db873782" - }, - { - "ImportPath": "github.com/denverdino/aliyungo/common", - "Rev": "afedced274aa9a7fcdd47ac97018f0f8db4e5de2" - }, - { - "ImportPath": "github.com/denverdino/aliyungo/oss", - "Rev": "afedced274aa9a7fcdd47ac97018f0f8db4e5de2" - }, - { - "ImportPath": "github.com/denverdino/aliyungo/util", - "Rev": "afedced274aa9a7fcdd47ac97018f0f8db4e5de2" - }, - { - "ImportPath": "github.com/docker/goamz/aws", - "Rev": "f0a21f5b2e12f83a505ecf79b633bb2035cf6f85" - }, - { - "ImportPath": "github.com/docker/goamz/s3", - "Rev": "f0a21f5b2e12f83a505ecf79b633bb2035cf6f85" - }, - { - "ImportPath": "github.com/docker/libtrust", - "Rev": "fa567046d9b14f6aa788882a950d69651d230b21" - }, - { - "ImportPath": "github.com/garyburd/redigo/internal", - "Rev": "535138d7bcd717d6531c701ef5933d98b1866257" - }, - { - "ImportPath": "github.com/garyburd/redigo/redis", - "Rev": "535138d7bcd717d6531c701ef5933d98b1866257" - }, - { - "ImportPath": "github.com/golang/protobuf/proto", - "Rev": "8d92cf5fc15a4382f8964b08e1f42a75c0591aa3" - }, - { - "ImportPath": "github.com/gorilla/context", - "Rev": "14f550f51af52180c2eefed15e5fd18d63c0a64a" - }, - { - "ImportPath": "github.com/gorilla/handlers", - "Rev": "60c7bfde3e33c201519a200a4507a158cc03a17b" - }, - { - "ImportPath": "github.com/gorilla/mux", - "Rev": "e444e69cbd2e2e3e0749a2f3c717cec491552bbf" - }, - { - "ImportPath": "github.com/inconshreveable/mousetrap", - "Rev": "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75" - }, - { - "ImportPath": "github.com/mitchellh/mapstructure", - "Rev": "482a9fd5fa83e8c4e7817413b80f3eb8feec03ef" - }, - { - "ImportPath": "github.com/ncw/swift", - "Rev": "b964f2ca856aac39885e258ad25aec08d5f64ee6" - }, - { - "ImportPath": "github.com/ncw/swift/swifttest", - "Rev": "b964f2ca856aac39885e258ad25aec08d5f64ee6" - }, - { - "ImportPath": "github.com/spf13/cobra", - "Rev": "312092086bed4968099259622145a0c9ae280064" - }, - { - "ImportPath": "github.com/spf13/pflag", - "Rev": "5644820622454e71517561946e3d94b9f9db6842" - }, - { - "ImportPath": "github.com/stevvooe/resumable", - "Rev": "51ad44105773cafcbe91927f70ac68e1bf78f8b4" - }, - { - "ImportPath": "github.com/stevvooe/resumable/sha256", - "Rev": "51ad44105773cafcbe91927f70ac68e1bf78f8b4" - }, - { - "ImportPath": "github.com/stevvooe/resumable/sha512", - "Rev": "51ad44105773cafcbe91927f70ac68e1bf78f8b4" - }, - { - "ImportPath": "github.com/yvasiyarov/go-metrics", - "Rev": "57bccd1ccd43f94bb17fdd8bf3007059b802f85e" - }, - { - "ImportPath": "github.com/yvasiyarov/gorelic", - "Comment": "v0.0.6-8-ga9bba5b", - "Rev": "a9bba5b9ab508a086f9a12b8c51fab68478e2128" - }, - { - "ImportPath": "github.com/yvasiyarov/newrelic_platform_go", - "Rev": "b21fdbd4370f3717f3bbd2bf41c223bc273068e6" - }, - { - "ImportPath": "golang.org/x/crypto/bcrypt", - "Rev": "c10c31b5e94b6f7a0283272dc2bb27163dcea24b" - }, - { - "ImportPath": "golang.org/x/crypto/blowfish", - "Rev": "c10c31b5e94b6f7a0283272dc2bb27163dcea24b" - }, - { - "ImportPath": "golang.org/x/crypto/ocsp", - "Rev": "c10c31b5e94b6f7a0283272dc2bb27163dcea24b" - }, - { - "ImportPath": "golang.org/x/net/context", - "Rev": "4876518f9e71663000c348837735820161a42df7" - }, - { - "ImportPath": "golang.org/x/net/context/ctxhttp", - "Rev": "4876518f9e71663000c348837735820161a42df7" - }, - { - "ImportPath": "golang.org/x/net/http2", - "Rev": "4876518f9e71663000c348837735820161a42df7" - }, - { - "ImportPath": "golang.org/x/net/http2/hpack", - "Rev": "4876518f9e71663000c348837735820161a42df7" - }, - { - "ImportPath": "golang.org/x/net/internal/timeseries", - "Rev": "4876518f9e71663000c348837735820161a42df7" - }, - { - "ImportPath": "golang.org/x/net/trace", - "Rev": "4876518f9e71663000c348837735820161a42df7" - }, - { - "ImportPath": "golang.org/x/oauth2", - "Rev": "045497edb6234273d67dbc25da3f2ddbc4c4cacf" - }, - { - "ImportPath": "golang.org/x/oauth2/google", - "Rev": "045497edb6234273d67dbc25da3f2ddbc4c4cacf" - }, - { - "ImportPath": "golang.org/x/oauth2/internal", - "Rev": "045497edb6234273d67dbc25da3f2ddbc4c4cacf" - }, - { - "ImportPath": "golang.org/x/oauth2/jws", - "Rev": "045497edb6234273d67dbc25da3f2ddbc4c4cacf" - }, - { - "ImportPath": "golang.org/x/oauth2/jwt", - "Rev": "045497edb6234273d67dbc25da3f2ddbc4c4cacf" - }, - { - "ImportPath": "golang.org/x/time/rate", - "Rev": "a4bde12657593d5e90d0533a3e4fd95e635124cb" - }, - { - "ImportPath": "google.golang.org/api/gensupport", - "Rev": "9bf6e6e569ff057f75d9604a46c52928f17d2b54" - }, - { - "ImportPath": "google.golang.org/api/googleapi", - "Rev": "9bf6e6e569ff057f75d9604a46c52928f17d2b54" - }, - { - "ImportPath": "google.golang.org/api/googleapi/internal/uritemplates", - "Rev": "9bf6e6e569ff057f75d9604a46c52928f17d2b54" - }, - { - "ImportPath": "google.golang.org/api/storage/v1", - "Rev": "9bf6e6e569ff057f75d9604a46c52928f17d2b54" - }, - { - "ImportPath": "google.golang.org/appengine", - "Rev": "12d5545dc1cfa6047a286d5e853841b6471f4c19" - }, - { - "ImportPath": "google.golang.org/appengine/internal", - "Rev": "12d5545dc1cfa6047a286d5e853841b6471f4c19" - }, - { - "ImportPath": "google.golang.org/appengine/internal/app_identity", - "Rev": "12d5545dc1cfa6047a286d5e853841b6471f4c19" - }, - { - "ImportPath": "google.golang.org/appengine/internal/base", - "Rev": "12d5545dc1cfa6047a286d5e853841b6471f4c19" - }, - { - "ImportPath": "google.golang.org/appengine/internal/datastore", - "Rev": "12d5545dc1cfa6047a286d5e853841b6471f4c19" - }, - { - "ImportPath": "google.golang.org/appengine/internal/log", - "Rev": "12d5545dc1cfa6047a286d5e853841b6471f4c19" - }, - { - "ImportPath": "google.golang.org/appengine/internal/modules", - "Rev": "12d5545dc1cfa6047a286d5e853841b6471f4c19" - }, - { - "ImportPath": "google.golang.org/appengine/internal/remote_api", - "Rev": "12d5545dc1cfa6047a286d5e853841b6471f4c19" - }, - { - "ImportPath": "google.golang.org/cloud", - "Rev": "975617b05ea8a58727e6c1a06b6161ff4185a9f2" - }, - { - "ImportPath": "google.golang.org/cloud/compute/metadata", - "Rev": "975617b05ea8a58727e6c1a06b6161ff4185a9f2" - }, - { - "ImportPath": "google.golang.org/cloud/internal", - "Rev": "975617b05ea8a58727e6c1a06b6161ff4185a9f2" - }, - { - "ImportPath": "google.golang.org/cloud/internal/opts", - "Rev": "975617b05ea8a58727e6c1a06b6161ff4185a9f2" - }, - { - "ImportPath": "google.golang.org/cloud/storage", - "Rev": "975617b05ea8a58727e6c1a06b6161ff4185a9f2" - }, - { - "ImportPath": "google.golang.org/grpc", - "Rev": "d3ddb4469d5a1b949fc7a7da7c1d6a0d1b6de994" - }, - { - "ImportPath": "google.golang.org/grpc/codes", - "Rev": "d3ddb4469d5a1b949fc7a7da7c1d6a0d1b6de994" - }, - { - "ImportPath": "google.golang.org/grpc/credentials", - "Rev": "d3ddb4469d5a1b949fc7a7da7c1d6a0d1b6de994" - }, - { - "ImportPath": "google.golang.org/grpc/grpclog", - "Rev": "d3ddb4469d5a1b949fc7a7da7c1d6a0d1b6de994" - }, - { - "ImportPath": "google.golang.org/grpc/internal", - "Rev": "d3ddb4469d5a1b949fc7a7da7c1d6a0d1b6de994" - }, - { - "ImportPath": "google.golang.org/grpc/metadata", - "Rev": "d3ddb4469d5a1b949fc7a7da7c1d6a0d1b6de994" - }, - { - "ImportPath": "google.golang.org/grpc/naming", - "Rev": "d3ddb4469d5a1b949fc7a7da7c1d6a0d1b6de994" - }, - { - "ImportPath": "google.golang.org/grpc/peer", - "Rev": "d3ddb4469d5a1b949fc7a7da7c1d6a0d1b6de994" - }, - { - "ImportPath": "google.golang.org/grpc/transport", - "Rev": "d3ddb4469d5a1b949fc7a7da7c1d6a0d1b6de994" - }, - { - "ImportPath": "gopkg.in/check.v1", - "Rev": "64131543e7896d5bcc6bd5a76287eb75ea96c673" - }, - { - "ImportPath": "gopkg.in/yaml.v2", - "Rev": "bef53efd0c76e49e6de55ead051f886bea7e9420" - }, - { - "ImportPath": "rsc.io/letsencrypt", - "Rev": "a019c9e6fce0c7132679dea13bd8df7c86ffe26c" - }, - { - "ImportPath": "rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme", - "Rev": "a019c9e6fce0c7132679dea13bd8df7c86ffe26c" - }, - { - "ImportPath": "rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1", - "Rev": "a019c9e6fce0c7132679dea13bd8df7c86ffe26c" - }, - { - "ImportPath": "rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher", - "Rev": "a019c9e6fce0c7132679dea13bd8df7c86ffe26c" - }, - { - "ImportPath": "rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json", - "Rev": "a019c9e6fce0c7132679dea13bd8df7c86ffe26c" - } - ] -} diff --git a/Godeps/Readme b/Godeps/Readme deleted file mode 100644 index 4cdaa53d..00000000 --- a/Godeps/Readme +++ /dev/null @@ -1,5 +0,0 @@ -This directory tree is generated automatically by godep. - -Please do not edit. - -See https://github.com/tools/godep for more information. diff --git a/vendor.conf b/vendor.conf new file mode 100644 index 00000000..7ebddd06 --- /dev/null +++ b/vendor.conf @@ -0,0 +1,39 @@ +github.com/Azure/azure-sdk-for-go/storage 0b5fe2abe0271ba07049eacaa65922d67c319543 +github.com/Sirupsen/logrus 55eb11d21d2a31a3cc93838241d04800f52e823d +github.com/aws/aws-sdk-go 90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6 +github.com/bugsnag/bugsnag-go b1d153021fcd90ca3f080db36bec96dc690fb274 +github.com/bugsnag/osext 0dd3f918b21bec95ace9dc86c7e70266cfc5c702 +github.com/bugsnag/panicwrap e2c28503fcd0675329da73bf48b33404db873782 +github.com/denverdino/aliyungo afedced274aa9a7fcdd47ac97018f0f8db4e5de2 +github.com/docker/goamz f0a21f5b2e12f83a505ecf79b633bb2035cf6f85 +github.com/docker/libtrust fa567046d9b14f6aa788882a950d69651d230b21 +github.com/garyburd/redigo 535138d7bcd717d6531c701ef5933d98b1866257 +github.com/go-ini/ini 2ba15ac2dc9cdf88c110ec2dc0ced7fa45f5678c +github.com/golang/protobuf/proto 8d92cf5fc15a4382f8964b08e1f42a75c0591aa3 +github.com/gorilla/context 14f550f51af52180c2eefed15e5fd18d63c0a64a +github.com/gorilla/handlers 60c7bfde3e33c201519a200a4507a158cc03a17b +github.com/gorilla/mux e444e69cbd2e2e3e0749a2f3c717cec491552bbf +github.com/inconshreveable/mousetrap 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75 +github.com/jmespath/go-jmespath bd40a432e4c76585ef6b72d3fd96fb9b6dc7b68d +github.com/miekg/dns 271c58e0c14f552178ea321a545ff9af38930f39 +github.com/mitchellh/mapstructure 482a9fd5fa83e8c4e7817413b80f3eb8feec03ef +github.com/ncw/swift b964f2ca856aac39885e258ad25aec08d5f64ee6 +github.com/spf13/cobra 312092086bed4968099259622145a0c9ae280064 +github.com/spf13/pflag 5644820622454e71517561946e3d94b9f9db6842 +github.com/stevvooe/resumable 51ad44105773cafcbe91927f70ac68e1bf78f8b4 +github.com/xenolf/lego/acme a9d8cec0e6563575e5868a005359ac97911b5985 +github.com/yvasiyarov/go-metrics 57bccd1ccd43f94bb17fdd8bf3007059b802f85e +github.com/yvasiyarov/gorelic a9bba5b9ab508a086f9a12b8c51fab68478e2128 +github.com/yvasiyarov/newrelic_platform_go b21fdbd4370f3717f3bbd2bf41c223bc273068e6 +golang.org/x/crypto c10c31b5e94b6f7a0283272dc2bb27163dcea24b +golang.org/x/net 4876518f9e71663000c348837735820161a42df7 +golang.org/x/oauth2 045497edb6234273d67dbc25da3f2ddbc4c4cacf +golang.org/x/time/rate a4bde12657593d5e90d0533a3e4fd95e635124cb +google.golang.org/api 9bf6e6e569ff057f75d9604a46c52928f17d2b54 +google.golang.org/appengine 12d5545dc1cfa6047a286d5e853841b6471f4c19 +google.golang.org/cloud 975617b05ea8a58727e6c1a06b6161ff4185a9f2 +google.golang.org/grpc d3ddb4469d5a1b949fc7a7da7c1d6a0d1b6de994 +gopkg.in/check.v1 64131543e7896d5bcc6bd5a76287eb75ea96c673 +gopkg.in/square/go-jose.v1 40d457b439244b546f023d056628e5184136899b +gopkg.in/yaml.v2 bef53efd0c76e49e6de55ead051f886bea7e9420 +rsc.io/letsencrypt e770c10b0f1a64775ae91d240407ce00d1a5bdeb https://github.com/dmcgowan/letsencrypt.git diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/README.md b/vendor/github.com/Azure/azure-sdk-for-go/storage/README.md deleted file mode 100644 index 0ab09984..00000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# Azure Storage SDK for Go - -The `github.com/Azure/azure-sdk-for-go/storage` package is used to perform operations in Azure Storage Service. To manage your storage accounts (Azure Resource Manager / ARM), use the [github.com/Azure/azure-sdk-for-go/arm/storage](../arm/storage) package. For your classic storage accounts (Azure Service Management / ASM), use [github.com/Azure/azure-sdk-for-go/management/storageservice](../management/storageservice) package. - -This package includes support for [Azure Storage Emulator](https://azure.microsoft.com/documentation/articles/storage-use-emulator/) \ No newline at end of file diff --git a/vendor/github.com/Sirupsen/logrus/.gitignore b/vendor/github.com/Sirupsen/logrus/.gitignore deleted file mode 100644 index 66be63a0..00000000 --- a/vendor/github.com/Sirupsen/logrus/.gitignore +++ /dev/null @@ -1 +0,0 @@ -logrus diff --git a/vendor/github.com/Sirupsen/logrus/.travis.yml b/vendor/github.com/Sirupsen/logrus/.travis.yml deleted file mode 100644 index 2d8c0866..00000000 --- a/vendor/github.com/Sirupsen/logrus/.travis.yml +++ /dev/null @@ -1,8 +0,0 @@ -language: go -go: - - 1.2 - - 1.3 - - 1.4 - - tip -install: - - go get -t ./... diff --git a/vendor/github.com/Sirupsen/logrus/CHANGELOG.md b/vendor/github.com/Sirupsen/logrus/CHANGELOG.md deleted file mode 100644 index eb72bff9..00000000 --- a/vendor/github.com/Sirupsen/logrus/CHANGELOG.md +++ /dev/null @@ -1,7 +0,0 @@ -# 0.7.3 - -formatter/\*: allow configuration of timestamp layout - -# 0.7.2 - -formatter/text: Add configuration option for time format (#158) diff --git a/vendor/github.com/Sirupsen/logrus/README.md b/vendor/github.com/Sirupsen/logrus/README.md deleted file mode 100644 index d55f9092..00000000 --- a/vendor/github.com/Sirupsen/logrus/README.md +++ /dev/null @@ -1,349 +0,0 @@ -# Logrus :walrus: [![Build Status](https://travis-ci.org/Sirupsen/logrus.svg?branch=master)](https://travis-ci.org/Sirupsen/logrus) [![godoc reference](https://godoc.org/github.com/Sirupsen/logrus?status.png)][godoc] - -Logrus is a structured logger for Go (golang), completely API compatible with -the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not -yet stable (pre 1.0). Logrus itself is completely stable and has been used in -many large deployments. The core API is unlikely to change much but please -version control your Logrus to make sure you aren't fetching latest `master` on -every build.** - -Nicely color-coded in development (when a TTY is attached, otherwise just -plain text): - -![Colored](http://i.imgur.com/PY7qMwd.png) - -With `log.Formatter = new(logrus.JSONFormatter)`, for easy parsing by logstash -or Splunk: - -```json -{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the -ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"} - -{"level":"warning","msg":"The group's number increased tremendously!", -"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"} - -{"animal":"walrus","level":"info","msg":"A giant walrus appears!", -"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"} - -{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.", -"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"} - -{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true, -"time":"2014-03-10 19:57:38.562543128 -0400 EDT"} -``` - -With the default `log.Formatter = new(logrus.TextFormatter)` when a TTY is not -attached, the output is compatible with the -[logfmt](http://godoc.org/github.com/kr/logfmt) format: - -```text -time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8 -time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10 -time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true -time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4 -time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009 -time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true -exit status 1 -``` - -#### Example - -The simplest way to use Logrus is simply the package-level exported logger: - -```go -package main - -import ( - log "github.com/Sirupsen/logrus" -) - -func main() { - log.WithFields(log.Fields{ - "animal": "walrus", - }).Info("A walrus appears") -} -``` - -Note that it's completely api-compatible with the stdlib logger, so you can -replace your `log` imports everywhere with `log "github.com/Sirupsen/logrus"` -and you'll now have the flexibility of Logrus. You can customize it all you -want: - -```go -package main - -import ( - "os" - log "github.com/Sirupsen/logrus" - "github.com/Sirupsen/logrus/hooks/airbrake" -) - -func init() { - // Log as JSON instead of the default ASCII formatter. - log.SetFormatter(&log.JSONFormatter{}) - - // Use the Airbrake hook to report errors that have Error severity or above to - // an exception tracker. You can create custom hooks, see the Hooks section. - log.AddHook(airbrake.NewHook("https://example.com", "xyz", "development")) - - // Output to stderr instead of stdout, could also be a file. - log.SetOutput(os.Stderr) - - // Only log the warning severity or above. - log.SetLevel(log.WarnLevel) -} - -func main() { - log.WithFields(log.Fields{ - "animal": "walrus", - "size": 10, - }).Info("A group of walrus emerges from the ocean") - - log.WithFields(log.Fields{ - "omg": true, - "number": 122, - }).Warn("The group's number increased tremendously!") - - log.WithFields(log.Fields{ - "omg": true, - "number": 100, - }).Fatal("The ice breaks!") - - // A common pattern is to re-use fields between logging statements by re-using - // the logrus.Entry returned from WithFields() - contextLogger := log.WithFields(log.Fields{ - "common": "this is a common field", - "other": "I also should be logged always", - }) - - contextLogger.Info("I'll be logged with common and other field") - contextLogger.Info("Me too") -} -``` - -For more advanced usage such as logging to multiple locations from the same -application, you can also create an instance of the `logrus` Logger: - -```go -package main - -import ( - "github.com/Sirupsen/logrus" -) - -// Create a new instance of the logger. You can have any number of instances. -var log = logrus.New() - -func main() { - // The API for setting attributes is a little different than the package level - // exported logger. See Godoc. - log.Out = os.Stderr - - log.WithFields(logrus.Fields{ - "animal": "walrus", - "size": 10, - }).Info("A group of walrus emerges from the ocean") -} -``` - -#### Fields - -Logrus encourages careful, structured logging though logging fields instead of -long, unparseable error messages. For example, instead of: `log.Fatalf("Failed -to send event %s to topic %s with key %d")`, you should log the much more -discoverable: - -```go -log.WithFields(log.Fields{ - "event": event, - "topic": topic, - "key": key, -}).Fatal("Failed to send event") -``` - -We've found this API forces you to think about logging in a way that produces -much more useful logging messages. We've been in countless situations where just -a single added field to a log statement that was already there would've saved us -hours. The `WithFields` call is optional. - -In general, with Logrus using any of the `printf`-family functions should be -seen as a hint you should add a field, however, you can still use the -`printf`-family functions with Logrus. - -#### Hooks - -You can add hooks for logging levels. For example to send errors to an exception -tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to -multiple places simultaneously, e.g. syslog. - -Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in -`init`: - -```go -import ( - log "github.com/Sirupsen/logrus" - "github.com/Sirupsen/logrus/hooks/airbrake" - "github.com/Sirupsen/logrus/hooks/syslog" - "log/syslog" -) - -func init() { - log.AddHook(airbrake.NewHook("https://example.com", "xyz", "development")) - - hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "") - if err != nil { - log.Error("Unable to connect to local syslog daemon") - } else { - log.AddHook(hook) - } -} -``` - - -| Hook | Description | -| ----- | ----------- | -| [Airbrake](https://github.com/Sirupsen/logrus/blob/master/hooks/airbrake/airbrake.go) | Send errors to an exception tracking service compatible with the Airbrake API. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. | -| [Papertrail](https://github.com/Sirupsen/logrus/blob/master/hooks/papertrail/papertrail.go) | Send errors to the Papertrail hosted logging service via UDP. | -| [Syslog](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. | -| [BugSnag](https://github.com/Sirupsen/logrus/blob/master/hooks/bugsnag/bugsnag.go) | Send errors to the Bugsnag exception tracking service. | -| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. | -| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) | -| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. | -| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` | -| [Graylog](https://github.com/gemnasium/logrus-hooks/tree/master/graylog) | Hook for logging to [Graylog](http://graylog2.org/) | - -#### Level logging - -Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic. - -```go -log.Debug("Useful debugging information.") -log.Info("Something noteworthy happened!") -log.Warn("You should probably take a look at this.") -log.Error("Something failed but I'm not quitting.") -// Calls os.Exit(1) after logging -log.Fatal("Bye.") -// Calls panic() after logging -log.Panic("I'm bailing.") -``` - -You can set the logging level on a `Logger`, then it will only log entries with -that severity or anything above it: - -```go -// Will log anything that is info or above (warn, error, fatal, panic). Default. -log.SetLevel(log.InfoLevel) -``` - -It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose -environment if your application has that. - -#### Entries - -Besides the fields added with `WithField` or `WithFields` some fields are -automatically added to all logging events: - -1. `time`. The timestamp when the entry was created. -2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after - the `AddFields` call. E.g. `Failed to send event.` -3. `level`. The logging level. E.g. `info`. - -#### Environments - -Logrus has no notion of environment. - -If you wish for hooks and formatters to only be used in specific environments, -you should handle that yourself. For example, if your application has a global -variable `Environment`, which is a string representation of the environment you -could do: - -```go -import ( - log "github.com/Sirupsen/logrus" -) - -init() { - // do something here to set environment depending on an environment variable - // or command-line flag - if Environment == "production" { - log.SetFormatter(logrus.JSONFormatter) - } else { - // The TextFormatter is default, you don't actually have to do this. - log.SetFormatter(logrus.TextFormatter) - } -} -``` - -This configuration is how `logrus` was intended to be used, but JSON in -production is mostly only useful if you do log aggregation with tools like -Splunk or Logstash. - -#### Formatters - -The built-in logging formatters are: - -* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise - without colors. - * *Note:* to force colored output when there is no TTY, set the `ForceColors` - field to `true`. To force no colored output even if there is a TTY set the - `DisableColors` field to `true` -* `logrus.JSONFormatter`. Logs fields as JSON. -* `logrus_logstash.LogstashFormatter`. Logs fields as Logstash Events (http://logstash.net). - - ```go - logrus.SetFormatter(&logrus_logstash.LogstashFormatter{Type: “application_name"}) - ``` - -Third party logging formatters: - -* [`zalgo`](https://github.com/aybabtme/logzalgo): invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦. - -You can define your formatter by implementing the `Formatter` interface, -requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a -`Fields` type (`map[string]interface{}`) with all your fields as well as the -default ones (see Entries section above): - -```go -type MyJSONFormatter struct { -} - -log.SetFormatter(new(MyJSONFormatter)) - -func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { - // Note this doesn't include Time, Level and Message which are available on - // the Entry. Consult `godoc` on information about those fields or read the - // source of the official loggers. - serialized, err := json.Marshal(entry.Data) - if err != nil { - return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) - } - return append(serialized, '\n'), nil -} -``` - -#### Logger as an `io.Writer` - -Logrus can be transormed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it. - -```go -w := logger.Writer() -defer w.Close() - -srv := http.Server{ - // create a stdlib log.Logger that writes to - // logrus.Logger. - ErrorLog: log.New(w, "", 0), -} -``` - -Each line written to that writer will be printed the usual way, using formatters -and hooks. The level for those entries is `info`. - -#### Rotation - -Log rotation is not provided with Logrus. Log rotation should be done by an -external program (like `logrotate(8)`) that can compress and delete old log -entries. It should not be a feature of the application-level logger. - - -[godoc]: https://godoc.org/github.com/Sirupsen/logrus diff --git a/vendor/github.com/aws/aws-sdk-go/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go/LICENSE.txt deleted file mode 100644 index d6456956..00000000 --- a/vendor/github.com/aws/aws-sdk-go/LICENSE.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go/NOTICE.txt b/vendor/github.com/aws/aws-sdk-go/NOTICE.txt deleted file mode 100644 index 5f14d116..00000000 --- a/vendor/github.com/aws/aws-sdk-go/NOTICE.txt +++ /dev/null @@ -1,3 +0,0 @@ -AWS SDK for Go -Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. -Copyright 2014-2015 Stripe, Inc. diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini b/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini deleted file mode 100644 index 7fc91d9d..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini +++ /dev/null @@ -1,12 +0,0 @@ -[default] -aws_access_key_id = accessKey -aws_secret_access_key = secret -aws_session_token = token - -[no_token] -aws_access_key_id = accessKey -aws_secret_access_key = secret - -[with_colon] -aws_access_key_id: accessKey -aws_secret_access_key: secret diff --git a/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json b/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json deleted file mode 100644 index 5f4991c2..00000000 --- a/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json +++ /dev/null @@ -1,75 +0,0 @@ -{ - "version": 2, - "endpoints": { - "*/*": { - "endpoint": "{service}.{region}.amazonaws.com" - }, - "cn-north-1/*": { - "endpoint": "{service}.{region}.amazonaws.com.cn", - "signatureVersion": "v4" - }, - "cn-north-1/ec2metadata": { - "endpoint": "http://169.254.169.254/latest" - }, - "us-gov-west-1/iam": { - "endpoint": "iam.us-gov.amazonaws.com" - }, - "us-gov-west-1/sts": { - "endpoint": "sts.us-gov-west-1.amazonaws.com" - }, - "us-gov-west-1/s3": { - "endpoint": "s3-{region}.amazonaws.com" - }, - "us-gov-west-1/ec2metadata": { - "endpoint": "http://169.254.169.254/latest" - }, - "*/cloudfront": { - "endpoint": "cloudfront.amazonaws.com", - "signingRegion": "us-east-1" - }, - "*/cloudsearchdomain": { - "endpoint": "", - "signingRegion": "us-east-1" - }, - "*/data.iot": { - "endpoint": "", - "signingRegion": "us-east-1" - }, - "*/ec2metadata": { - "endpoint": "http://169.254.169.254/latest" - }, - "*/iam": { - "endpoint": "iam.amazonaws.com", - "signingRegion": "us-east-1" - }, - "*/importexport": { - "endpoint": "importexport.amazonaws.com", - "signingRegion": "us-east-1" - }, - "*/route53": { - "endpoint": "route53.amazonaws.com", - "signingRegion": "us-east-1" - }, - "*/sts": { - "endpoint": "sts.amazonaws.com", - "signingRegion": "us-east-1" - }, - "*/waf": { - "endpoint": "waf.amazonaws.com", - "signingRegion": "us-east-1" - }, - "us-east-1/sdb": { - "endpoint": "sdb.amazonaws.com", - "signingRegion": "us-east-1" - }, - "*/s3": { - "endpoint": "s3-{region}.amazonaws.com" - }, - "us-east-1/s3": { - "endpoint": "s3.amazonaws.com" - }, - "eu-central-1/s3": { - "endpoint": "{service}.{region}.amazonaws.com" - } - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/.gitignore b/vendor/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/.gitignore deleted file mode 100644 index 7adca943..00000000 --- a/vendor/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -testdata/conf_out.ini -ini.sublime-project -ini.sublime-workspace -testdata/conf_reflect.ini diff --git a/vendor/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/README.md b/vendor/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/README.md deleted file mode 100644 index 1272038a..00000000 --- a/vendor/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/README.md +++ /dev/null @@ -1,560 +0,0 @@ -ini [![Build Status](https://drone.io/github.com/go-ini/ini/status.png)](https://drone.io/github.com/go-ini/ini/latest) [![](http://gocover.io/_badge/github.com/go-ini/ini)](http://gocover.io/github.com/go-ini/ini) -=== - -![](https://avatars0.githubusercontent.com/u/10216035?v=3&s=200) - -Package ini provides INI file read and write functionality in Go. - -[简体中文](README_ZH.md) - -## Feature - -- Load multiple data sources(`[]byte` or file) with overwrites. -- Read with recursion values. -- Read with parent-child sections. -- Read with auto-increment key names. -- Read with multiple-line values. -- Read with tons of helper methods. -- Read and convert values to Go types. -- Read and **WRITE** comments of sections and keys. -- Manipulate sections, keys and comments with ease. -- Keep sections and keys in order as you parse and save. - -## Installation - - go get gopkg.in/ini.v1 - -## Getting Started - -### Loading from data sources - -A **Data Source** is either raw data in type `[]byte` or a file name with type `string` and you can load **as many as** data sources you want. Passing other types will simply return an error. - -```go -cfg, err := ini.Load([]byte("raw data"), "filename") -``` - -Or start with an empty object: - -```go -cfg := ini.Empty() -``` - -When you cannot decide how many data sources to load at the beginning, you still able to **Append()** them later. - -```go -err := cfg.Append("other file", []byte("other raw data")) -``` - -### Working with sections - -To get a section, you would need to: - -```go -section, err := cfg.GetSection("section name") -``` - -For a shortcut for default section, just give an empty string as name: - -```go -section, err := cfg.GetSection("") -``` - -When you're pretty sure the section exists, following code could make your life easier: - -```go -section := cfg.Section("") -``` - -What happens when the section somehow does not exist? Don't panic, it automatically creates and returns a new section to you. - -To create a new section: - -```go -err := cfg.NewSection("new section") -``` - -To get a list of sections or section names: - -```go -sections := cfg.Sections() -names := cfg.SectionStrings() -``` - -### Working with keys - -To get a key under a section: - -```go -key, err := cfg.Section("").GetKey("key name") -``` - -Same rule applies to key operations: - -```go -key := cfg.Section("").Key("key name") -``` - -To create a new key: - -```go -err := cfg.Section("").NewKey("name", "value") -``` - -To get a list of keys or key names: - -```go -keys := cfg.Section("").Keys() -names := cfg.Section("").KeyStrings() -``` - -To get a clone hash of keys and corresponding values: - -```go -hash := cfg.GetSection("").KeysHash() -``` - -### Working with values - -To get a string value: - -```go -val := cfg.Section("").Key("key name").String() -``` - -To validate key value on the fly: - -```go -val := cfg.Section("").Key("key name").Validate(func(in string) string { - if len(in) == 0 { - return "default" - } - return in -}) -``` - -To get value with types: - -```go -// For boolean values: -// true when value is: 1, t, T, TRUE, true, True, YES, yes, Yes, ON, on, On -// false when value is: 0, f, F, FALSE, false, False, NO, no, No, OFF, off, Off -v, err = cfg.Section("").Key("BOOL").Bool() -v, err = cfg.Section("").Key("FLOAT64").Float64() -v, err = cfg.Section("").Key("INT").Int() -v, err = cfg.Section("").Key("INT64").Int64() -v, err = cfg.Section("").Key("UINT").Uint() -v, err = cfg.Section("").Key("UINT64").Uint64() -v, err = cfg.Section("").Key("TIME").TimeFormat(time.RFC3339) -v, err = cfg.Section("").Key("TIME").Time() // RFC3339 - -v = cfg.Section("").Key("BOOL").MustBool() -v = cfg.Section("").Key("FLOAT64").MustFloat64() -v = cfg.Section("").Key("INT").MustInt() -v = cfg.Section("").Key("INT64").MustInt64() -v = cfg.Section("").Key("UINT").MustUint() -v = cfg.Section("").Key("UINT64").MustUint64() -v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339) -v = cfg.Section("").Key("TIME").MustTime() // RFC3339 - -// Methods start with Must also accept one argument for default value -// when key not found or fail to parse value to given type. -// Except method MustString, which you have to pass a default value. - -v = cfg.Section("").Key("String").MustString("default") -v = cfg.Section("").Key("BOOL").MustBool(true) -v = cfg.Section("").Key("FLOAT64").MustFloat64(1.25) -v = cfg.Section("").Key("INT").MustInt(10) -v = cfg.Section("").Key("INT64").MustInt64(99) -v = cfg.Section("").Key("UINT").MustUint(3) -v = cfg.Section("").Key("UINT64").MustUint64(6) -v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339, time.Now()) -v = cfg.Section("").Key("TIME").MustTime(time.Now()) // RFC3339 -``` - -What if my value is three-line long? - -```ini -[advance] -ADDRESS = """404 road, -NotFound, State, 5000 -Earth""" -``` - -Not a problem! - -```go -cfg.Section("advance").Key("ADDRESS").String() - -/* --- start --- -404 road, -NotFound, State, 5000 -Earth ------- end --- */ -``` - -That's cool, how about continuation lines? - -```ini -[advance] -two_lines = how about \ - continuation lines? -lots_of_lines = 1 \ - 2 \ - 3 \ - 4 -``` - -Piece of cake! - -```go -cfg.Section("advance").Key("two_lines").String() // how about continuation lines? -cfg.Section("advance").Key("lots_of_lines").String() // 1 2 3 4 -``` - -Note that single quotes around values will be stripped: - -```ini -foo = "some value" // foo: some value -bar = 'some value' // bar: some value -``` - -That's all? Hmm, no. - -#### Helper methods of working with values - -To get value with given candidates: - -```go -v = cfg.Section("").Key("STRING").In("default", []string{"str", "arr", "types"}) -v = cfg.Section("").Key("FLOAT64").InFloat64(1.1, []float64{1.25, 2.5, 3.75}) -v = cfg.Section("").Key("INT").InInt(5, []int{10, 20, 30}) -v = cfg.Section("").Key("INT64").InInt64(10, []int64{10, 20, 30}) -v = cfg.Section("").Key("UINT").InUint(4, []int{3, 6, 9}) -v = cfg.Section("").Key("UINT64").InUint64(8, []int64{3, 6, 9}) -v = cfg.Section("").Key("TIME").InTimeFormat(time.RFC3339, time.Now(), []time.Time{time1, time2, time3}) -v = cfg.Section("").Key("TIME").InTime(time.Now(), []time.Time{time1, time2, time3}) // RFC3339 -``` - -Default value will be presented if value of key is not in candidates you given, and default value does not need be one of candidates. - -To validate value in a given range: - -```go -vals = cfg.Section("").Key("FLOAT64").RangeFloat64(0.0, 1.1, 2.2) -vals = cfg.Section("").Key("INT").RangeInt(0, 10, 20) -vals = cfg.Section("").Key("INT64").RangeInt64(0, 10, 20) -vals = cfg.Section("").Key("UINT").RangeUint(0, 3, 9) -vals = cfg.Section("").Key("UINT64").RangeUint64(0, 3, 9) -vals = cfg.Section("").Key("TIME").RangeTimeFormat(time.RFC3339, time.Now(), minTime, maxTime) -vals = cfg.Section("").Key("TIME").RangeTime(time.Now(), minTime, maxTime) // RFC3339 -``` - -To auto-split value into slice: - -```go -vals = cfg.Section("").Key("STRINGS").Strings(",") -vals = cfg.Section("").Key("FLOAT64S").Float64s(",") -vals = cfg.Section("").Key("INTS").Ints(",") -vals = cfg.Section("").Key("INT64S").Int64s(",") -vals = cfg.Section("").Key("UINTS").Uints(",") -vals = cfg.Section("").Key("UINT64S").Uint64s(",") -vals = cfg.Section("").Key("TIMES").Times(",") -``` - -### Save your configuration - -Finally, it's time to save your configuration to somewhere. - -A typical way to save configuration is writing it to a file: - -```go -// ... -err = cfg.SaveTo("my.ini") -err = cfg.SaveToIndent("my.ini", "\t") -``` - -Another way to save is writing to a `io.Writer` interface: - -```go -// ... -cfg.WriteTo(writer) -cfg.WriteToIndent(writer, "\t") -``` - -## Advanced Usage - -### Recursive Values - -For all value of keys, there is a special syntax `%()s`, where `` is the key name in same section or default section, and `%()s` will be replaced by corresponding value(empty string if key not found). You can use this syntax at most 99 level of recursions. - -```ini -NAME = ini - -[author] -NAME = Unknwon -GITHUB = https://github.com/%(NAME)s - -[package] -FULL_NAME = github.com/go-ini/%(NAME)s -``` - -```go -cfg.Section("author").Key("GITHUB").String() // https://github.com/Unknwon -cfg.Section("package").Key("FULL_NAME").String() // github.com/go-ini/ini -``` - -### Parent-child Sections - -You can use `.` in section name to indicate parent-child relationship between two or more sections. If the key not found in the child section, library will try again on its parent section until there is no parent section. - -```ini -NAME = ini -VERSION = v1 -IMPORT_PATH = gopkg.in/%(NAME)s.%(VERSION)s - -[package] -CLONE_URL = https://%(IMPORT_PATH)s - -[package.sub] -``` - -```go -cfg.Section("package.sub").Key("CLONE_URL").String() // https://gopkg.in/ini.v1 -``` - -### Auto-increment Key Names - -If key name is `-` in data source, then it would be seen as special syntax for auto-increment key name start from 1, and every section is independent on counter. - -```ini -[features] --: Support read/write comments of keys and sections --: Support auto-increment of key names --: Support load multiple files to overwrite key values -``` - -```go -cfg.Section("features").KeyStrings() // []{"#1", "#2", "#3"} -``` - -### Map To Struct - -Want more objective way to play with INI? Cool. - -```ini -Name = Unknwon -age = 21 -Male = true -Born = 1993-01-01T20:17:05Z - -[Note] -Content = Hi is a good man! -Cities = HangZhou, Boston -``` - -```go -type Note struct { - Content string - Cities []string -} - -type Person struct { - Name string - Age int `ini:"age"` - Male bool - Born time.Time - Note - Created time.Time `ini:"-"` -} - -func main() { - cfg, err := ini.Load("path/to/ini") - // ... - p := new(Person) - err = cfg.MapTo(p) - // ... - - // Things can be simpler. - err = ini.MapTo(p, "path/to/ini") - // ... - - // Just map a section? Fine. - n := new(Note) - err = cfg.Section("Note").MapTo(n) - // ... -} -``` - -Can I have default value for field? Absolutely. - -Assign it before you map to struct. It will keep the value as it is if the key is not presented or got wrong type. - -```go -// ... -p := &Person{ - Name: "Joe", -} -// ... -``` - -It's really cool, but what's the point if you can't give me my file back from struct? - -### Reflect From Struct - -Why not? - -```go -type Embeded struct { - Dates []time.Time `delim:"|"` - Places []string - None []int -} - -type Author struct { - Name string `ini:"NAME"` - Male bool - Age int - GPA float64 - NeverMind string `ini:"-"` - *Embeded -} - -func main() { - a := &Author{"Unknwon", true, 21, 2.8, "", - &Embeded{ - []time.Time{time.Now(), time.Now()}, - []string{"HangZhou", "Boston"}, - []int{}, - }} - cfg := ini.Empty() - err = ini.ReflectFrom(cfg, a) - // ... -} -``` - -So, what do I get? - -```ini -NAME = Unknwon -Male = true -Age = 21 -GPA = 2.8 - -[Embeded] -Dates = 2015-08-07T22:14:22+08:00|2015-08-07T22:14:22+08:00 -Places = HangZhou,Boston -None = -``` - -#### Name Mapper - -To save your time and make your code cleaner, this library supports [`NameMapper`](https://gowalker.org/gopkg.in/ini.v1#NameMapper) between struct field and actual section and key name. - -There are 2 built-in name mappers: - -- `AllCapsUnderscore`: it converts to format `ALL_CAPS_UNDERSCORE` then match section or key. -- `TitleUnderscore`: it converts to format `title_underscore` then match section or key. - -To use them: - -```go -type Info struct { - PackageName string -} - -func main() { - err = ini.MapToWithMapper(&Info{}, ini.TitleUnderscore, []byte("packag_name=ini")) - // ... - - cfg, err := ini.Load([]byte("PACKAGE_NAME=ini")) - // ... - info := new(Info) - cfg.NameMapper = ini.AllCapsUnderscore - err = cfg.MapTo(info) - // ... -} -``` - -Same rules of name mapper apply to `ini.ReflectFromWithMapper` function. - -#### Other Notes On Map/Reflect - -Any embedded struct is treated as a section by default, and there is no automatic parent-child relations in map/reflect feature: - -```go -type Child struct { - Age string -} - -type Parent struct { - Name string - Child -} - -type Config struct { - City string - Parent -} -``` - -Example configuration: - -```ini -City = Boston - -[Parent] -Name = Unknwon - -[Child] -Age = 21 -``` - -What if, yes, I'm paranoid, I want embedded struct to be in the same section. Well, all roads lead to Rome. - -```go -type Child struct { - Age string -} - -type Parent struct { - Name string - Child `ini:"Parent"` -} - -type Config struct { - City string - Parent -} -``` - -Example configuration: - -```ini -City = Boston - -[Parent] -Name = Unknwon -Age = 21 -``` - -## Getting Help - -- [API Documentation](https://gowalker.org/gopkg.in/ini.v1) -- [File An Issue](https://github.com/go-ini/ini/issues/new) - -## FAQs - -### What does `BlockMode` field do? - -By default, library lets you read and write values so we need a locker to make sure your data is safe. But in cases that you are very sure about only reading data through the library, you can set `cfg.BlockMode = false` to speed up read operations about **50-70%** faster. - -### Why another INI library? - -Many people are using my another INI library [goconfig](https://github.com/Unknwon/goconfig), so the reason for this one is I would like to make more Go style code. Also when you set `cfg.BlockMode = false`, this one is about **10-30%** faster. - -To make those changes I have to confirm API broken, so it's safer to keep it in another place and start using `gopkg.in` to version my package at this time.(PS: shorter import path) - -## License - -This project is under Apache v2 License. See the [LICENSE](LICENSE) file for the full license text. diff --git a/vendor/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/README_ZH.md b/vendor/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/README_ZH.md deleted file mode 100644 index 45e19edd..00000000 --- a/vendor/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/README_ZH.md +++ /dev/null @@ -1,547 +0,0 @@ -本包提供了 Go 语言中读写 INI 文件的功能。 - -## 功能特性 - -- 支持覆盖加载多个数据源(`[]byte` 或文件) -- 支持递归读取键值 -- 支持读取父子分区 -- 支持读取自增键名 -- 支持读取多行的键值 -- 支持大量辅助方法 -- 支持在读取时直接转换为 Go 语言类型 -- 支持读取和 **写入** 分区和键的注释 -- 轻松操作分区、键值和注释 -- 在保存文件时分区和键值会保持原有的顺序 - -## 下载安装 - - go get gopkg.in/ini.v1 - -## 开始使用 - -### 从数据源加载 - -一个 **数据源** 可以是 `[]byte` 类型的原始数据,或 `string` 类型的文件路径。您可以加载 **任意多个** 数据源。如果您传递其它类型的数据源,则会直接返回错误。 - -```go -cfg, err := ini.Load([]byte("raw data"), "filename") -``` - -或者从一个空白的文件开始: - -```go -cfg := ini.Empty() -``` - -当您在一开始无法决定需要加载哪些数据源时,仍可以使用 **Append()** 在需要的时候加载它们。 - -```go -err := cfg.Append("other file", []byte("other raw data")) -``` - -### 操作分区(Section) - -获取指定分区: - -```go -section, err := cfg.GetSection("section name") -``` - -如果您想要获取默认分区,则可以用空字符串代替分区名: - -```go -section, err := cfg.GetSection("") -``` - -当您非常确定某个分区是存在的,可以使用以下简便方法: - -```go -section := cfg.Section("") -``` - -如果不小心判断错了,要获取的分区其实是不存在的,那会发生什么呢?没事的,它会自动创建并返回一个对应的分区对象给您。 - -创建一个分区: - -```go -err := cfg.NewSection("new section") -``` - -获取所有分区对象或名称: - -```go -sections := cfg.Sections() -names := cfg.SectionStrings() -``` - -### 操作键(Key) - -获取某个分区下的键: - -```go -key, err := cfg.Section("").GetKey("key name") -``` - -和分区一样,您也可以直接获取键而忽略错误处理: - -```go -key := cfg.Section("").Key("key name") -``` - -创建一个新的键: - -```go -err := cfg.Section("").NewKey("name", "value") -``` - -获取分区下的所有键或键名: - -```go -keys := cfg.Section("").Keys() -names := cfg.Section("").KeyStrings() -``` - -获取分区下的所有键值对的克隆: - -```go -hash := cfg.GetSection("").KeysHash() -``` - -### 操作键值(Value) - -获取一个类型为字符串(string)的值: - -```go -val := cfg.Section("").Key("key name").String() -``` - -获取值的同时通过自定义函数进行处理验证: - -```go -val := cfg.Section("").Key("key name").Validate(func(in string) string { - if len(in) == 0 { - return "default" - } - return in -}) -``` - -获取其它类型的值: - -```go -// 布尔值的规则: -// true 当值为:1, t, T, TRUE, true, True, YES, yes, Yes, ON, on, On -// false 当值为:0, f, F, FALSE, false, False, NO, no, No, OFF, off, Off -v, err = cfg.Section("").Key("BOOL").Bool() -v, err = cfg.Section("").Key("FLOAT64").Float64() -v, err = cfg.Section("").Key("INT").Int() -v, err = cfg.Section("").Key("INT64").Int64() -v, err = cfg.Section("").Key("UINT").Uint() -v, err = cfg.Section("").Key("UINT64").Uint64() -v, err = cfg.Section("").Key("TIME").TimeFormat(time.RFC3339) -v, err = cfg.Section("").Key("TIME").Time() // RFC3339 - -v = cfg.Section("").Key("BOOL").MustBool() -v = cfg.Section("").Key("FLOAT64").MustFloat64() -v = cfg.Section("").Key("INT").MustInt() -v = cfg.Section("").Key("INT64").MustInt64() -v = cfg.Section("").Key("UINT").MustUint() -v = cfg.Section("").Key("UINT64").MustUint64() -v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339) -v = cfg.Section("").Key("TIME").MustTime() // RFC3339 - -// 由 Must 开头的方法名允许接收一个相同类型的参数来作为默认值, -// 当键不存在或者转换失败时,则会直接返回该默认值。 -// 但是,MustString 方法必须传递一个默认值。 - -v = cfg.Seciont("").Key("String").MustString("default") -v = cfg.Section("").Key("BOOL").MustBool(true) -v = cfg.Section("").Key("FLOAT64").MustFloat64(1.25) -v = cfg.Section("").Key("INT").MustInt(10) -v = cfg.Section("").Key("INT64").MustInt64(99) -v = cfg.Section("").Key("UINT").MustUint(3) -v = cfg.Section("").Key("UINT64").MustUint64(6) -v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339, time.Now()) -v = cfg.Section("").Key("TIME").MustTime(time.Now()) // RFC3339 -``` - -如果我的值有好多行怎么办? - -```ini -[advance] -ADDRESS = """404 road, -NotFound, State, 5000 -Earth""" -``` - -嗯哼?小 case! - -```go -cfg.Section("advance").Key("ADDRESS").String() - -/* --- start --- -404 road, -NotFound, State, 5000 -Earth ------- end --- */ -``` - -赞爆了!那要是我属于一行的内容写不下想要写到第二行怎么办? - -```ini -[advance] -two_lines = how about \ - continuation lines? -lots_of_lines = 1 \ - 2 \ - 3 \ - 4 -``` - -简直是小菜一碟! - -```go -cfg.Section("advance").Key("two_lines").String() // how about continuation lines? -cfg.Section("advance").Key("lots_of_lines").String() // 1 2 3 4 -``` - -需要注意的是,值两侧的单引号会被自动剔除: - -```ini -foo = "some value" // foo: some value -bar = 'some value' // bar: some value -``` - -这就是全部了?哈哈,当然不是。 - -#### 操作键值的辅助方法 - -获取键值时设定候选值: - -```go -v = cfg.Section("").Key("STRING").In("default", []string{"str", "arr", "types"}) -v = cfg.Section("").Key("FLOAT64").InFloat64(1.1, []float64{1.25, 2.5, 3.75}) -v = cfg.Section("").Key("INT").InInt(5, []int{10, 20, 30}) -v = cfg.Section("").Key("INT64").InInt64(10, []int64{10, 20, 30}) -v = cfg.Section("").Key("UINT").InUint(4, []int{3, 6, 9}) -v = cfg.Section("").Key("UINT64").InUint64(8, []int64{3, 6, 9}) -v = cfg.Section("").Key("TIME").InTimeFormat(time.RFC3339, time.Now(), []time.Time{time1, time2, time3}) -v = cfg.Section("").Key("TIME").InTime(time.Now(), []time.Time{time1, time2, time3}) // RFC3339 -``` - -如果获取到的值不是候选值的任意一个,则会返回默认值,而默认值不需要是候选值中的一员。 - -验证获取的值是否在指定范围内: - -```go -vals = cfg.Section("").Key("FLOAT64").RangeFloat64(0.0, 1.1, 2.2) -vals = cfg.Section("").Key("INT").RangeInt(0, 10, 20) -vals = cfg.Section("").Key("INT64").RangeInt64(0, 10, 20) -vals = cfg.Section("").Key("UINT").RangeUint(0, 3, 9) -vals = cfg.Section("").Key("UINT64").RangeUint64(0, 3, 9) -vals = cfg.Section("").Key("TIME").RangeTimeFormat(time.RFC3339, time.Now(), minTime, maxTime) -vals = cfg.Section("").Key("TIME").RangeTime(time.Now(), minTime, maxTime) // RFC3339 -``` - -自动分割键值为切片(slice): - -```go -vals = cfg.Section("").Key("STRINGS").Strings(",") -vals = cfg.Section("").Key("FLOAT64S").Float64s(",") -vals = cfg.Section("").Key("INTS").Ints(",") -vals = cfg.Section("").Key("INT64S").Int64s(",") -vals = cfg.Section("").Key("UINTS").Uints(",") -vals = cfg.Section("").Key("UINT64S").Uint64s(",") -vals = cfg.Section("").Key("TIMES").Times(",") -``` - -### 保存配置 - -终于到了这个时刻,是时候保存一下配置了。 - -比较原始的做法是输出配置到某个文件: - -```go -// ... -err = cfg.SaveTo("my.ini") -err = cfg.SaveToIndent("my.ini", "\t") -``` - -另一个比较高级的做法是写入到任何实现 `io.Writer` 接口的对象中: - -```go -// ... -cfg.WriteTo(writer) -cfg.WriteToIndent(writer, "\t") -``` - -### 高级用法 - -#### 递归读取键值 - -在获取所有键值的过程中,特殊语法 `%()s` 会被应用,其中 `` 可以是相同分区或者默认分区下的键名。字符串 `%()s` 会被相应的键值所替代,如果指定的键不存在,则会用空字符串替代。您可以最多使用 99 层的递归嵌套。 - -```ini -NAME = ini - -[author] -NAME = Unknwon -GITHUB = https://github.com/%(NAME)s - -[package] -FULL_NAME = github.com/go-ini/%(NAME)s -``` - -```go -cfg.Section("author").Key("GITHUB").String() // https://github.com/Unknwon -cfg.Section("package").Key("FULL_NAME").String() // github.com/go-ini/ini -``` - -#### 读取父子分区 - -您可以在分区名称中使用 `.` 来表示两个或多个分区之间的父子关系。如果某个键在子分区中不存在,则会去它的父分区中再次寻找,直到没有父分区为止。 - -```ini -NAME = ini -VERSION = v1 -IMPORT_PATH = gopkg.in/%(NAME)s.%(VERSION)s - -[package] -CLONE_URL = https://%(IMPORT_PATH)s - -[package.sub] -``` - -```go -cfg.Section("package.sub").Key("CLONE_URL").String() // https://gopkg.in/ini.v1 -``` - -#### 读取自增键名 - -如果数据源中的键名为 `-`,则认为该键使用了自增键名的特殊语法。计数器从 1 开始,并且分区之间是相互独立的。 - -```ini -[features] --: Support read/write comments of keys and sections --: Support auto-increment of key names --: Support load multiple files to overwrite key values -``` - -```go -cfg.Section("features").KeyStrings() // []{"#1", "#2", "#3"} -``` - -### 映射到结构 - -想要使用更加面向对象的方式玩转 INI 吗?好主意。 - -```ini -Name = Unknwon -age = 21 -Male = true -Born = 1993-01-01T20:17:05Z - -[Note] -Content = Hi is a good man! -Cities = HangZhou, Boston -``` - -```go -type Note struct { - Content string - Cities []string -} - -type Person struct { - Name string - Age int `ini:"age"` - Male bool - Born time.Time - Note - Created time.Time `ini:"-"` -} - -func main() { - cfg, err := ini.Load("path/to/ini") - // ... - p := new(Person) - err = cfg.MapTo(p) - // ... - - // 一切竟可以如此的简单。 - err = ini.MapTo(p, "path/to/ini") - // ... - - // 嗯哼?只需要映射一个分区吗? - n := new(Note) - err = cfg.Section("Note").MapTo(n) - // ... -} -``` - -结构的字段怎么设置默认值呢?很简单,只要在映射之前对指定字段进行赋值就可以了。如果键未找到或者类型错误,该值不会发生改变。 - -```go -// ... -p := &Person{ - Name: "Joe", -} -// ... -``` - -这样玩 INI 真的好酷啊!然而,如果不能还给我原来的配置文件,有什么卵用? - -### 从结构反射 - -可是,我有说不能吗? - -```go -type Embeded struct { - Dates []time.Time `delim:"|"` - Places []string - None []int -} - -type Author struct { - Name string `ini:"NAME"` - Male bool - Age int - GPA float64 - NeverMind string `ini:"-"` - *Embeded -} - -func main() { - a := &Author{"Unknwon", true, 21, 2.8, "", - &Embeded{ - []time.Time{time.Now(), time.Now()}, - []string{"HangZhou", "Boston"}, - []int{}, - }} - cfg := ini.Empty() - err = ini.ReflectFrom(cfg, a) - // ... -} -``` - -瞧瞧,奇迹发生了。 - -```ini -NAME = Unknwon -Male = true -Age = 21 -GPA = 2.8 - -[Embeded] -Dates = 2015-08-07T22:14:22+08:00|2015-08-07T22:14:22+08:00 -Places = HangZhou,Boston -None = -``` - -#### 名称映射器(Name Mapper) - -为了节省您的时间并简化代码,本库支持类型为 [`NameMapper`](https://gowalker.org/gopkg.in/ini.v1#NameMapper) 的名称映射器,该映射器负责结构字段名与分区名和键名之间的映射。 - -目前有 2 款内置的映射器: - -- `AllCapsUnderscore`:该映射器将字段名转换至格式 `ALL_CAPS_UNDERSCORE` 后再去匹配分区名和键名。 -- `TitleUnderscore`:该映射器将字段名转换至格式 `title_underscore` 后再去匹配分区名和键名。 - -使用方法: - -```go -type Info struct{ - PackageName string -} - -func main() { - err = ini.MapToWithMapper(&Info{}, ini.TitleUnderscore, []byte("packag_name=ini")) - // ... - - cfg, err := ini.Load([]byte("PACKAGE_NAME=ini")) - // ... - info := new(Info) - cfg.NameMapper = ini.AllCapsUnderscore - err = cfg.MapTo(info) - // ... -} -``` - -使用函数 `ini.ReflectFromWithMapper` 时也可应用相同的规则。 - -#### 映射/反射的其它说明 - -任何嵌入的结构都会被默认认作一个不同的分区,并且不会自动产生所谓的父子分区关联: - -```go -type Child struct { - Age string -} - -type Parent struct { - Name string - Child -} - -type Config struct { - City string - Parent -} -``` - -示例配置文件: - -```ini -City = Boston - -[Parent] -Name = Unknwon - -[Child] -Age = 21 -``` - -很好,但是,我就是要嵌入结构也在同一个分区。好吧,你爹是李刚! - -```go -type Child struct { - Age string -} - -type Parent struct { - Name string - Child `ini:"Parent"` -} - -type Config struct { - City string - Parent -} -``` - -示例配置文件: - -```ini -City = Boston - -[Parent] -Name = Unknwon -Age = 21 -``` - -## 获取帮助 - -- [API 文档](https://gowalker.org/gopkg.in/ini.v1) -- [创建工单](https://github.com/go-ini/ini/issues/new) - -## 常见问题 - -### 字段 `BlockMode` 是什么? - -默认情况下,本库会在您进行读写操作时采用锁机制来确保数据时间。但在某些情况下,您非常确定只进行读操作。此时,您可以通过设置 `cfg.BlockMode = false` 来将读操作提升大约 **50-70%** 的性能。 - -### 为什么要写另一个 INI 解析库? - -许多人都在使用我的 [goconfig](https://github.com/Unknwon/goconfig) 来完成对 INI 文件的操作,但我希望使用更加 Go 风格的代码。并且当您设置 `cfg.BlockMode = false` 时,会有大约 **10-30%** 的性能提升。 - -为了做出这些改变,我必须对 API 进行破坏,所以新开一个仓库是最安全的做法。除此之外,本库直接使用 `gopkg.in` 来进行版本化发布。(其实真相是导入路径更短了) diff --git a/vendor/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/ini.go b/vendor/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/ini.go deleted file mode 100644 index 1fee789a..00000000 --- a/vendor/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/ini.go +++ /dev/null @@ -1,1226 +0,0 @@ -// Copyright 2014 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -// Package ini provides INI file read and write functionality in Go. -package ini - -import ( - "bufio" - "bytes" - "errors" - "fmt" - "io" - "os" - "regexp" - "runtime" - "strconv" - "strings" - "sync" - "time" -) - -const ( - DEFAULT_SECTION = "DEFAULT" - // Maximum allowed depth when recursively substituing variable names. - _DEPTH_VALUES = 99 - - _VERSION = "1.6.0" -) - -func Version() string { - return _VERSION -} - -var ( - LineBreak = "\n" - - // Variable regexp pattern: %(variable)s - varPattern = regexp.MustCompile(`%\(([^\)]+)\)s`) - - // Write spaces around "=" to look better. - PrettyFormat = true -) - -func init() { - if runtime.GOOS == "windows" { - LineBreak = "\r\n" - } -} - -func inSlice(str string, s []string) bool { - for _, v := range s { - if str == v { - return true - } - } - return false -} - -// dataSource is a interface that returns file content. -type dataSource interface { - ReadCloser() (io.ReadCloser, error) -} - -type sourceFile struct { - name string -} - -func (s sourceFile) ReadCloser() (_ io.ReadCloser, err error) { - return os.Open(s.name) -} - -type bytesReadCloser struct { - reader io.Reader -} - -func (rc *bytesReadCloser) Read(p []byte) (n int, err error) { - return rc.reader.Read(p) -} - -func (rc *bytesReadCloser) Close() error { - return nil -} - -type sourceData struct { - data []byte -} - -func (s *sourceData) ReadCloser() (io.ReadCloser, error) { - return &bytesReadCloser{bytes.NewReader(s.data)}, nil -} - -// ____ __. -// | |/ _|____ ___.__. -// | <_/ __ < | | -// | | \ ___/\___ | -// |____|__ \___ > ____| -// \/ \/\/ - -// Key represents a key under a section. -type Key struct { - s *Section - Comment string - name string - value string - isAutoIncr bool -} - -// Name returns name of key. -func (k *Key) Name() string { - return k.name -} - -// Value returns raw value of key for performance purpose. -func (k *Key) Value() string { - return k.value -} - -// String returns string representation of value. -func (k *Key) String() string { - val := k.value - if strings.Index(val, "%") == -1 { - return val - } - - for i := 0; i < _DEPTH_VALUES; i++ { - vr := varPattern.FindString(val) - if len(vr) == 0 { - break - } - - // Take off leading '%(' and trailing ')s'. - noption := strings.TrimLeft(vr, "%(") - noption = strings.TrimRight(noption, ")s") - - // Search in the same section. - nk, err := k.s.GetKey(noption) - if err != nil { - // Search again in default section. - nk, _ = k.s.f.Section("").GetKey(noption) - } - - // Substitute by new value and take off leading '%(' and trailing ')s'. - val = strings.Replace(val, vr, nk.value, -1) - } - return val -} - -// Validate accepts a validate function which can -// return modifed result as key value. -func (k *Key) Validate(fn func(string) string) string { - return fn(k.String()) -} - -// parseBool returns the boolean value represented by the string. -// -// It accepts 1, t, T, TRUE, true, True, YES, yes, Yes, ON, on, On, -// 0, f, F, FALSE, false, False, NO, no, No, OFF, off, Off. -// Any other value returns an error. -func parseBool(str string) (value bool, err error) { - switch str { - case "1", "t", "T", "true", "TRUE", "True", "YES", "yes", "Yes", "ON", "on", "On": - return true, nil - case "0", "f", "F", "false", "FALSE", "False", "NO", "no", "No", "OFF", "off", "Off": - return false, nil - } - return false, fmt.Errorf("parsing \"%s\": invalid syntax", str) -} - -// Bool returns bool type value. -func (k *Key) Bool() (bool, error) { - return parseBool(k.String()) -} - -// Float64 returns float64 type value. -func (k *Key) Float64() (float64, error) { - return strconv.ParseFloat(k.String(), 64) -} - -// Int returns int type value. -func (k *Key) Int() (int, error) { - return strconv.Atoi(k.String()) -} - -// Int64 returns int64 type value. -func (k *Key) Int64() (int64, error) { - return strconv.ParseInt(k.String(), 10, 64) -} - -// Uint returns uint type valued. -func (k *Key) Uint() (uint, error) { - u, e := strconv.ParseUint(k.String(), 10, 64) - return uint(u), e -} - -// Uint64 returns uint64 type value. -func (k *Key) Uint64() (uint64, error) { - return strconv.ParseUint(k.String(), 10, 64) -} - -// Duration returns time.Duration type value. -func (k *Key) Duration() (time.Duration, error) { - return time.ParseDuration(k.String()) -} - -// TimeFormat parses with given format and returns time.Time type value. -func (k *Key) TimeFormat(format string) (time.Time, error) { - return time.Parse(format, k.String()) -} - -// Time parses with RFC3339 format and returns time.Time type value. -func (k *Key) Time() (time.Time, error) { - return k.TimeFormat(time.RFC3339) -} - -// MustString returns default value if key value is empty. -func (k *Key) MustString(defaultVal string) string { - val := k.String() - if len(val) == 0 { - return defaultVal - } - return val -} - -// MustBool always returns value without error, -// it returns false if error occurs. -func (k *Key) MustBool(defaultVal ...bool) bool { - val, err := k.Bool() - if len(defaultVal) > 0 && err != nil { - return defaultVal[0] - } - return val -} - -// MustFloat64 always returns value without error, -// it returns 0.0 if error occurs. -func (k *Key) MustFloat64(defaultVal ...float64) float64 { - val, err := k.Float64() - if len(defaultVal) > 0 && err != nil { - return defaultVal[0] - } - return val -} - -// MustInt always returns value without error, -// it returns 0 if error occurs. -func (k *Key) MustInt(defaultVal ...int) int { - val, err := k.Int() - if len(defaultVal) > 0 && err != nil { - return defaultVal[0] - } - return val -} - -// MustInt64 always returns value without error, -// it returns 0 if error occurs. -func (k *Key) MustInt64(defaultVal ...int64) int64 { - val, err := k.Int64() - if len(defaultVal) > 0 && err != nil { - return defaultVal[0] - } - return val -} - -// MustUint always returns value without error, -// it returns 0 if error occurs. -func (k *Key) MustUint(defaultVal ...uint) uint { - val, err := k.Uint() - if len(defaultVal) > 0 && err != nil { - return defaultVal[0] - } - return val -} - -// MustUint64 always returns value without error, -// it returns 0 if error occurs. -func (k *Key) MustUint64(defaultVal ...uint64) uint64 { - val, err := k.Uint64() - if len(defaultVal) > 0 && err != nil { - return defaultVal[0] - } - return val -} - -// MustDuration always returns value without error, -// it returns zero value if error occurs. -func (k *Key) MustDuration(defaultVal ...time.Duration) time.Duration { - val, err := k.Duration() - if len(defaultVal) > 0 && err != nil { - return defaultVal[0] - } - return val -} - -// MustTimeFormat always parses with given format and returns value without error, -// it returns zero value if error occurs. -func (k *Key) MustTimeFormat(format string, defaultVal ...time.Time) time.Time { - val, err := k.TimeFormat(format) - if len(defaultVal) > 0 && err != nil { - return defaultVal[0] - } - return val -} - -// MustTime always parses with RFC3339 format and returns value without error, -// it returns zero value if error occurs. -func (k *Key) MustTime(defaultVal ...time.Time) time.Time { - return k.MustTimeFormat(time.RFC3339, defaultVal...) -} - -// In always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) In(defaultVal string, candidates []string) string { - val := k.String() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InFloat64 always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InFloat64(defaultVal float64, candidates []float64) float64 { - val := k.MustFloat64() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InInt always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InInt(defaultVal int, candidates []int) int { - val := k.MustInt() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InInt64 always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InInt64(defaultVal int64, candidates []int64) int64 { - val := k.MustInt64() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InUint always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InUint(defaultVal uint, candidates []uint) uint { - val := k.MustUint() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InUint64 always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InUint64(defaultVal uint64, candidates []uint64) uint64 { - val := k.MustUint64() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InTimeFormat always parses with given format and returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InTimeFormat(format string, defaultVal time.Time, candidates []time.Time) time.Time { - val := k.MustTimeFormat(format) - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InTime always parses with RFC3339 format and returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InTime(defaultVal time.Time, candidates []time.Time) time.Time { - return k.InTimeFormat(time.RFC3339, defaultVal, candidates) -} - -// RangeFloat64 checks if value is in given range inclusively, -// and returns default value if it's not. -func (k *Key) RangeFloat64(defaultVal, min, max float64) float64 { - val := k.MustFloat64() - if val < min || val > max { - return defaultVal - } - return val -} - -// RangeInt checks if value is in given range inclusively, -// and returns default value if it's not. -func (k *Key) RangeInt(defaultVal, min, max int) int { - val := k.MustInt() - if val < min || val > max { - return defaultVal - } - return val -} - -// RangeInt64 checks if value is in given range inclusively, -// and returns default value if it's not. -func (k *Key) RangeInt64(defaultVal, min, max int64) int64 { - val := k.MustInt64() - if val < min || val > max { - return defaultVal - } - return val -} - -// RangeTimeFormat checks if value with given format is in given range inclusively, -// and returns default value if it's not. -func (k *Key) RangeTimeFormat(format string, defaultVal, min, max time.Time) time.Time { - val := k.MustTimeFormat(format) - if val.Unix() < min.Unix() || val.Unix() > max.Unix() { - return defaultVal - } - return val -} - -// RangeTime checks if value with RFC3339 format is in given range inclusively, -// and returns default value if it's not. -func (k *Key) RangeTime(defaultVal, min, max time.Time) time.Time { - return k.RangeTimeFormat(time.RFC3339, defaultVal, min, max) -} - -// Strings returns list of string devide by given delimiter. -func (k *Key) Strings(delim string) []string { - str := k.String() - if len(str) == 0 { - return []string{} - } - - vals := strings.Split(str, delim) - for i := range vals { - vals[i] = strings.TrimSpace(vals[i]) - } - return vals -} - -// Float64s returns list of float64 devide by given delimiter. -func (k *Key) Float64s(delim string) []float64 { - strs := k.Strings(delim) - vals := make([]float64, len(strs)) - for i := range strs { - vals[i], _ = strconv.ParseFloat(strs[i], 64) - } - return vals -} - -// Ints returns list of int devide by given delimiter. -func (k *Key) Ints(delim string) []int { - strs := k.Strings(delim) - vals := make([]int, len(strs)) - for i := range strs { - vals[i], _ = strconv.Atoi(strs[i]) - } - return vals -} - -// Int64s returns list of int64 devide by given delimiter. -func (k *Key) Int64s(delim string) []int64 { - strs := k.Strings(delim) - vals := make([]int64, len(strs)) - for i := range strs { - vals[i], _ = strconv.ParseInt(strs[i], 10, 64) - } - return vals -} - -// Uints returns list of uint devide by given delimiter. -func (k *Key) Uints(delim string) []uint { - strs := k.Strings(delim) - vals := make([]uint, len(strs)) - for i := range strs { - u, _ := strconv.ParseUint(strs[i], 10, 64) - vals[i] = uint(u) - } - return vals -} - -// Uint64s returns list of uint64 devide by given delimiter. -func (k *Key) Uint64s(delim string) []uint64 { - strs := k.Strings(delim) - vals := make([]uint64, len(strs)) - for i := range strs { - vals[i], _ = strconv.ParseUint(strs[i], 10, 64) - } - return vals -} - -// TimesFormat parses with given format and returns list of time.Time devide by given delimiter. -func (k *Key) TimesFormat(format, delim string) []time.Time { - strs := k.Strings(delim) - vals := make([]time.Time, len(strs)) - for i := range strs { - vals[i], _ = time.Parse(format, strs[i]) - } - return vals -} - -// Times parses with RFC3339 format and returns list of time.Time devide by given delimiter. -func (k *Key) Times(delim string) []time.Time { - return k.TimesFormat(time.RFC3339, delim) -} - -// SetValue changes key value. -func (k *Key) SetValue(v string) { - k.value = v -} - -// _________ __ .__ -// / _____/ ____ _____/ |_|__| ____ ____ -// \_____ \_/ __ \_/ ___\ __\ |/ _ \ / \ -// / \ ___/\ \___| | | ( <_> ) | \ -// /_______ /\___ >\___ >__| |__|\____/|___| / -// \/ \/ \/ \/ - -// Section represents a config section. -type Section struct { - f *File - Comment string - name string - keys map[string]*Key - keyList []string - keysHash map[string]string -} - -func newSection(f *File, name string) *Section { - return &Section{f, "", name, make(map[string]*Key), make([]string, 0, 10), make(map[string]string)} -} - -// Name returns name of Section. -func (s *Section) Name() string { - return s.name -} - -// NewKey creates a new key to given section. -func (s *Section) NewKey(name, val string) (*Key, error) { - if len(name) == 0 { - return nil, errors.New("error creating new key: empty key name") - } - - if s.f.BlockMode { - s.f.lock.Lock() - defer s.f.lock.Unlock() - } - - if inSlice(name, s.keyList) { - s.keys[name].value = val - return s.keys[name], nil - } - - s.keyList = append(s.keyList, name) - s.keys[name] = &Key{s, "", name, val, false} - s.keysHash[name] = val - return s.keys[name], nil -} - -// GetKey returns key in section by given name. -func (s *Section) GetKey(name string) (*Key, error) { - // FIXME: change to section level lock? - if s.f.BlockMode { - s.f.lock.RLock() - } - key := s.keys[name] - if s.f.BlockMode { - s.f.lock.RUnlock() - } - - if key == nil { - // Check if it is a child-section. - sname := s.name - for { - if i := strings.LastIndex(sname, "."); i > -1 { - sname = sname[:i] - sec, err := s.f.GetSection(sname) - if err != nil { - continue - } - return sec.GetKey(name) - } else { - break - } - } - return nil, fmt.Errorf("error when getting key of section '%s': key '%s' not exists", s.name, name) - } - return key, nil -} - -// Key assumes named Key exists in section and returns a zero-value when not. -func (s *Section) Key(name string) *Key { - key, err := s.GetKey(name) - if err != nil { - // It's OK here because the only possible error is empty key name, - // but if it's empty, this piece of code won't be executed. - key, _ = s.NewKey(name, "") - return key - } - return key -} - -// Keys returns list of keys of section. -func (s *Section) Keys() []*Key { - keys := make([]*Key, len(s.keyList)) - for i := range s.keyList { - keys[i] = s.Key(s.keyList[i]) - } - return keys -} - -// KeyStrings returns list of key names of section. -func (s *Section) KeyStrings() []string { - list := make([]string, len(s.keyList)) - copy(list, s.keyList) - return list -} - -// KeysHash returns keys hash consisting of names and values. -func (s *Section) KeysHash() map[string]string { - if s.f.BlockMode { - s.f.lock.RLock() - defer s.f.lock.RUnlock() - } - - hash := map[string]string{} - for key, value := range s.keysHash { - hash[key] = value - } - return hash -} - -// DeleteKey deletes a key from section. -func (s *Section) DeleteKey(name string) { - if s.f.BlockMode { - s.f.lock.Lock() - defer s.f.lock.Unlock() - } - - for i, k := range s.keyList { - if k == name { - s.keyList = append(s.keyList[:i], s.keyList[i+1:]...) - delete(s.keys, name) - return - } - } -} - -// ___________.__.__ -// \_ _____/|__| | ____ -// | __) | | | _/ __ \ -// | \ | | |_\ ___/ -// \___ / |__|____/\___ > -// \/ \/ - -// File represents a combination of a or more INI file(s) in memory. -type File struct { - // Should make things safe, but sometimes doesn't matter. - BlockMode bool - // Make sure data is safe in multiple goroutines. - lock sync.RWMutex - - // Allow combination of multiple data sources. - dataSources []dataSource - // Actual data is stored here. - sections map[string]*Section - - // To keep data in order. - sectionList []string - - NameMapper -} - -// newFile initializes File object with given data sources. -func newFile(dataSources []dataSource) *File { - return &File{ - BlockMode: true, - dataSources: dataSources, - sections: make(map[string]*Section), - sectionList: make([]string, 0, 10), - } -} - -func parseDataSource(source interface{}) (dataSource, error) { - switch s := source.(type) { - case string: - return sourceFile{s}, nil - case []byte: - return &sourceData{s}, nil - default: - return nil, fmt.Errorf("error parsing data source: unknown type '%s'", s) - } -} - -// Load loads and parses from INI data sources. -// Arguments can be mixed of file name with string type, or raw data in []byte. -func Load(source interface{}, others ...interface{}) (_ *File, err error) { - sources := make([]dataSource, len(others)+1) - sources[0], err = parseDataSource(source) - if err != nil { - return nil, err - } - for i := range others { - sources[i+1], err = parseDataSource(others[i]) - if err != nil { - return nil, err - } - } - f := newFile(sources) - return f, f.Reload() -} - -// Empty returns an empty file object. -func Empty() *File { - // Ignore error here, we sure our data is good. - f, _ := Load([]byte("")) - return f -} - -// NewSection creates a new section. -func (f *File) NewSection(name string) (*Section, error) { - if len(name) == 0 { - return nil, errors.New("error creating new section: empty section name") - } - - if f.BlockMode { - f.lock.Lock() - defer f.lock.Unlock() - } - - if inSlice(name, f.sectionList) { - return f.sections[name], nil - } - - f.sectionList = append(f.sectionList, name) - f.sections[name] = newSection(f, name) - return f.sections[name], nil -} - -// NewSections creates a list of sections. -func (f *File) NewSections(names ...string) (err error) { - for _, name := range names { - if _, err = f.NewSection(name); err != nil { - return err - } - } - return nil -} - -// GetSection returns section by given name. -func (f *File) GetSection(name string) (*Section, error) { - if len(name) == 0 { - name = DEFAULT_SECTION - } - - if f.BlockMode { - f.lock.RLock() - defer f.lock.RUnlock() - } - - sec := f.sections[name] - if sec == nil { - return nil, fmt.Errorf("error when getting section: section '%s' not exists", name) - } - return sec, nil -} - -// Section assumes named section exists and returns a zero-value when not. -func (f *File) Section(name string) *Section { - sec, err := f.GetSection(name) - if err != nil { - // Note: It's OK here because the only possible error is empty section name, - // but if it's empty, this piece of code won't be executed. - sec, _ = f.NewSection(name) - return sec - } - return sec -} - -// Section returns list of Section. -func (f *File) Sections() []*Section { - sections := make([]*Section, len(f.sectionList)) - for i := range f.sectionList { - sections[i] = f.Section(f.sectionList[i]) - } - return sections -} - -// SectionStrings returns list of section names. -func (f *File) SectionStrings() []string { - list := make([]string, len(f.sectionList)) - copy(list, f.sectionList) - return list -} - -// DeleteSection deletes a section. -func (f *File) DeleteSection(name string) { - if f.BlockMode { - f.lock.Lock() - defer f.lock.Unlock() - } - - if len(name) == 0 { - name = DEFAULT_SECTION - } - - for i, s := range f.sectionList { - if s == name { - f.sectionList = append(f.sectionList[:i], f.sectionList[i+1:]...) - delete(f.sections, name) - return - } - } -} - -func cutComment(str string) string { - i := strings.Index(str, "#") - if i == -1 { - return str - } - return str[:i] -} - -func checkMultipleLines(buf *bufio.Reader, line, val, valQuote string) (string, error) { - isEnd := false - for { - next, err := buf.ReadString('\n') - if err != nil { - if err != io.EOF { - return "", err - } - isEnd = true - } - pos := strings.LastIndex(next, valQuote) - if pos > -1 { - val += next[:pos] - break - } - val += next - if isEnd { - return "", fmt.Errorf("error parsing line: missing closing key quote from '%s' to '%s'", line, next) - } - } - return val, nil -} - -func checkContinuationLines(buf *bufio.Reader, val string) (string, bool, error) { - isEnd := false - for { - valLen := len(val) - if valLen == 0 || val[valLen-1] != '\\' { - break - } - val = val[:valLen-1] - - next, err := buf.ReadString('\n') - if err != nil { - if err != io.EOF { - return "", isEnd, err - } - isEnd = true - } - - next = strings.TrimSpace(next) - if len(next) == 0 { - break - } - val += next - } - return val, isEnd, nil -} - -// parse parses data through an io.Reader. -func (f *File) parse(reader io.Reader) error { - buf := bufio.NewReader(reader) - - // Handle BOM-UTF8. - // http://en.wikipedia.org/wiki/Byte_order_mark#Representations_of_byte_order_marks_by_encoding - mask, err := buf.Peek(3) - if err == nil && len(mask) >= 3 && mask[0] == 239 && mask[1] == 187 && mask[2] == 191 { - buf.Read(mask) - } - - count := 1 - comments := "" - isEnd := false - - section, err := f.NewSection(DEFAULT_SECTION) - if err != nil { - return err - } - - for { - line, err := buf.ReadString('\n') - line = strings.TrimSpace(line) - length := len(line) - - // Check error and ignore io.EOF just for a moment. - if err != nil { - if err != io.EOF { - return fmt.Errorf("error reading next line: %v", err) - } - // The last line of file could be an empty line. - if length == 0 { - break - } - isEnd = true - } - - // Skip empty lines. - if length == 0 { - continue - } - - switch { - case line[0] == '#' || line[0] == ';': // Comments. - if len(comments) == 0 { - comments = line - } else { - comments += LineBreak + line - } - continue - case line[0] == '[' && line[length-1] == ']': // New sction. - section, err = f.NewSection(strings.TrimSpace(line[1 : length-1])) - if err != nil { - return err - } - - if len(comments) > 0 { - section.Comment = comments - comments = "" - } - // Reset counter. - count = 1 - continue - } - - // Other possibilities. - var ( - i int - keyQuote string - kname string - valQuote string - val string - ) - - // Key name surrounded by quotes. - if line[0] == '"' { - if length > 6 && line[0:3] == `"""` { - keyQuote = `"""` - } else { - keyQuote = `"` - } - } else if line[0] == '`' { - keyQuote = "`" - } - if len(keyQuote) > 0 { - qLen := len(keyQuote) - pos := strings.Index(line[qLen:], keyQuote) - if pos == -1 { - return fmt.Errorf("error parsing line: missing closing key quote: %s", line) - } - pos = pos + qLen - i = strings.IndexAny(line[pos:], "=:") - if i < 0 { - return fmt.Errorf("error parsing line: key-value delimiter not found: %s", line) - } else if i == pos { - return fmt.Errorf("error parsing line: key is empty: %s", line) - } - i = i + pos - kname = line[qLen:pos] // Just keep spaces inside quotes. - } else { - i = strings.IndexAny(line, "=:") - if i < 0 { - return fmt.Errorf("error parsing line: key-value delimiter not found: %s", line) - } else if i == 0 { - return fmt.Errorf("error parsing line: key is empty: %s", line) - } - kname = strings.TrimSpace(line[0:i]) - } - - isAutoIncr := false - // Auto increment. - if kname == "-" { - isAutoIncr = true - kname = "#" + fmt.Sprint(count) - count++ - } - - lineRight := strings.TrimSpace(line[i+1:]) - lineRightLength := len(lineRight) - firstChar := "" - if lineRightLength >= 2 { - firstChar = lineRight[0:1] - } - if firstChar == "`" { - valQuote = "`" - } else if firstChar == `"` { - if lineRightLength >= 3 && lineRight[0:3] == `"""` { - valQuote = `"""` - } else { - valQuote = `"` - } - } else if firstChar == `'` { - valQuote = `'` - } - - if len(valQuote) > 0 { - qLen := len(valQuote) - pos := strings.LastIndex(lineRight[qLen:], valQuote) - // For multiple-line value check. - if pos == -1 { - if valQuote == `"` || valQuote == `'` { - return fmt.Errorf("error parsing line: single quote does not allow multiple-line value: %s", line) - } - - val = lineRight[qLen:] + "\n" - val, err = checkMultipleLines(buf, line, val, valQuote) - if err != nil { - return err - } - } else { - val = lineRight[qLen : pos+qLen] - } - } else { - val = strings.TrimSpace(cutComment(lineRight)) - val, isEnd, err = checkContinuationLines(buf, val) - if err != nil { - return err - } - } - - k, err := section.NewKey(kname, val) - if err != nil { - return err - } - k.isAutoIncr = isAutoIncr - if len(comments) > 0 { - k.Comment = comments - comments = "" - } - - if isEnd { - break - } - } - return nil -} - -func (f *File) reload(s dataSource) error { - r, err := s.ReadCloser() - if err != nil { - return err - } - defer r.Close() - - return f.parse(r) -} - -// Reload reloads and parses all data sources. -func (f *File) Reload() (err error) { - for _, s := range f.dataSources { - if err = f.reload(s); err != nil { - return err - } - } - return nil -} - -// Append appends one or more data sources and reloads automatically. -func (f *File) Append(source interface{}, others ...interface{}) error { - ds, err := parseDataSource(source) - if err != nil { - return err - } - f.dataSources = append(f.dataSources, ds) - for _, s := range others { - ds, err = parseDataSource(s) - if err != nil { - return err - } - f.dataSources = append(f.dataSources, ds) - } - return f.Reload() -} - -// WriteToIndent writes file content into io.Writer with given value indention. -func (f *File) WriteToIndent(w io.Writer, indent string) (n int64, err error) { - equalSign := "=" - if PrettyFormat { - equalSign = " = " - } - - // Use buffer to make sure target is safe until finish encoding. - buf := bytes.NewBuffer(nil) - for i, sname := range f.sectionList { - sec := f.Section(sname) - if len(sec.Comment) > 0 { - if sec.Comment[0] != '#' && sec.Comment[0] != ';' { - sec.Comment = "; " + sec.Comment - } - if _, err = buf.WriteString(sec.Comment + LineBreak); err != nil { - return 0, err - } - } - - if i > 0 { - if _, err = buf.WriteString("[" + sname + "]" + LineBreak); err != nil { - return 0, err - } - } else { - // Write nothing if default section is empty. - if len(sec.keyList) == 0 { - continue - } - } - - for _, kname := range sec.keyList { - key := sec.Key(kname) - if len(key.Comment) > 0 { - if len(indent) > 0 && sname != DEFAULT_SECTION { - buf.WriteString(indent) - } - if key.Comment[0] != '#' && key.Comment[0] != ';' { - key.Comment = "; " + key.Comment - } - if _, err = buf.WriteString(key.Comment + LineBreak); err != nil { - return 0, err - } - } - - if len(indent) > 0 && sname != DEFAULT_SECTION { - buf.WriteString(indent) - } - - switch { - case key.isAutoIncr: - kname = "-" - case strings.Contains(kname, "`") || strings.Contains(kname, `"`): - kname = `"""` + kname + `"""` - case strings.Contains(kname, `=`) || strings.Contains(kname, `:`): - kname = "`" + kname + "`" - } - - val := key.value - // In case key value contains "\n", "`" or "\"". - if strings.Contains(val, "\n") || strings.Contains(val, "`") || strings.Contains(val, `"`) || - strings.Contains(val, "#") { - val = `"""` + val + `"""` - } - if _, err = buf.WriteString(kname + equalSign + val + LineBreak); err != nil { - return 0, err - } - } - - // Put a line between sections. - if _, err = buf.WriteString(LineBreak); err != nil { - return 0, err - } - } - - return buf.WriteTo(w) -} - -// WriteTo writes file content into io.Writer. -func (f *File) WriteTo(w io.Writer) (int64, error) { - return f.WriteToIndent(w, "") -} - -// SaveToIndent writes content to file system with given value indention. -func (f *File) SaveToIndent(filename, indent string) error { - // Note: Because we are truncating with os.Create, - // so it's safer to save to a temporary file location and rename afte done. - tmpPath := filename + "." + strconv.Itoa(time.Now().Nanosecond()) + ".tmp" - defer os.Remove(tmpPath) - - fw, err := os.Create(tmpPath) - if err != nil { - return err - } - - if _, err = f.WriteToIndent(fw, indent); err != nil { - fw.Close() - return err - } - fw.Close() - - // Remove old file and rename the new one. - os.Remove(filename) - return os.Rename(tmpPath, filename) -} - -// SaveTo writes content to file system. -func (f *File) SaveTo(filename string) error { - return f.SaveToIndent(filename, "") -} diff --git a/vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/.gitignore b/vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/.gitignore deleted file mode 100644 index 531fcc11..00000000 --- a/vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -jpgo -jmespath-fuzz.zip -cpu.out -go-jmespath.test diff --git a/vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/.travis.yml b/vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/.travis.yml deleted file mode 100644 index 1f980775..00000000 --- a/vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/.travis.yml +++ /dev/null @@ -1,9 +0,0 @@ -language: go - -sudo: false - -go: - - 1.4 - -install: go get -v -t ./... -script: make test diff --git a/vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/Makefile b/vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/Makefile deleted file mode 100644 index a828d284..00000000 --- a/vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/Makefile +++ /dev/null @@ -1,44 +0,0 @@ - -CMD = jpgo - -help: - @echo "Please use \`make ' where is one of" - @echo " test to run all the tests" - @echo " build to build the library and jp executable" - @echo " generate to run codegen" - - -generate: - go generate ./... - -build: - rm -f $(CMD) - go build ./... - rm -f cmd/$(CMD)/$(CMD) && cd cmd/$(CMD)/ && go build ./... - mv cmd/$(CMD)/$(CMD) . - -test: - go test -v ./... - -check: - go vet ./... - @echo "golint ./..." - @lint=`golint ./...`; \ - lint=`echo "$$lint" | grep -v "astnodetype_string.go" | grep -v "toktype_string.go"`; \ - echo "$$lint"; \ - if [ "$$lint" != "" ]; then exit 1; fi - -htmlc: - go test -coverprofile="/tmp/jpcov" && go tool cover -html="/tmp/jpcov" && unlink /tmp/jpcov - -buildfuzz: - go-fuzz-build github.com/jmespath/go-jmespath/fuzz - -fuzz: buildfuzz - go-fuzz -bin=./jmespath-fuzz.zip -workdir=fuzz/testdata - -bench: - go test -bench . -cpuprofile cpu.out - -pprof-cpu: - go tool pprof ./go-jmespath.test ./cpu.out diff --git a/vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/README.md b/vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/README.md deleted file mode 100644 index 187ef676..00000000 --- a/vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/README.md +++ /dev/null @@ -1,7 +0,0 @@ -# go-jmespath - A JMESPath implementation in Go - -[![Build Status](https://img.shields.io/travis/jmespath/go-jmespath.svg)](https://travis-ci.org/jmespath/go-jmespath) - - - -See http://jmespath.org for more info. diff --git a/vendor/github.com/bugsnag/bugsnag-go/.travis.yml b/vendor/github.com/bugsnag/bugsnag-go/.travis.yml deleted file mode 100644 index a77773b8..00000000 --- a/vendor/github.com/bugsnag/bugsnag-go/.travis.yml +++ /dev/null @@ -1,13 +0,0 @@ -language: go - -go: - - 1.1 - - 1.2 - - 1.3 - - tip - -install: - - go get github.com/bugsnag/panicwrap - - go get github.com/bugsnag/osext - - go get github.com/bitly/go-simplejson - - go get github.com/revel/revel diff --git a/vendor/github.com/bugsnag/bugsnag-go/LICENSE.txt b/vendor/github.com/bugsnag/bugsnag-go/LICENSE.txt deleted file mode 100644 index 3cb0ec0f..00000000 --- a/vendor/github.com/bugsnag/bugsnag-go/LICENSE.txt +++ /dev/null @@ -1,20 +0,0 @@ -Copyright (c) 2014 Bugsnag - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/bugsnag/bugsnag-go/README.md b/vendor/github.com/bugsnag/bugsnag-go/README.md deleted file mode 100644 index b5432293..00000000 --- a/vendor/github.com/bugsnag/bugsnag-go/README.md +++ /dev/null @@ -1,489 +0,0 @@ -Bugsnag Notifier for Golang -=========================== - -The Bugsnag Notifier for Golang gives you instant notification of panics, or -unexpected errors, in your golang app. Any unhandled panics will trigger a -notification to be sent to your Bugsnag project. - -[Bugsnag](http://bugsnag.com) captures errors in real-time from your web, -mobile and desktop applications, helping you to understand and resolve them -as fast as possible. [Create a free account](http://bugsnag.com) to start -capturing exceptions from your applications. - -## How to Install - -1. Download the code - - ```shell - go get github.com/bugsnag/bugsnag-go - ``` - -### Using with net/http apps - -For a golang app based on [net/http](https://godoc.org/net/http), integrating -Bugsnag takes two steps. You should also use these instructions if you're using -the [gorilla toolkit](http://www.gorillatoolkit.org/), or the -[pat](https://github.com/bmizerany/pat/) muxer. - -1. Configure bugsnag at the start of your `main()` function: - - ```go - import "github.com/bugsnag/bugsnag-go" - - func main() { - bugsnag.Configure(bugsnag.Configuration{ - APIKey: "YOUR_API_KEY_HERE", - ReleaseStage: "production", - // more configuration options - }) - - // rest of your program. - } - ``` - -2. Wrap your server in a [bugsnag.Handler](https://godoc.org/github.com/bugsnag/bugsnag-go/#Handler) - - ```go - // a. If you're using the builtin http mux, you can just pass - // bugsnag.Handler(nil) to http.ListenAndServer - http.ListenAndServe(":8080", bugsnag.Handler(nil)) - - // b. If you're creating a server manually yourself, you can set - // its handlers the same way - srv := http.Server{ - Handler: bugsnag.Handler(nil) - } - - // c. If you're not using the builtin http mux, wrap your own handler - // (though make sure that it doesn't already catch panics) - http.ListenAndServe(":8080", bugsnag.Handler(handler)) - ``` - -### Using with Revel apps - -There are two steps to get panic handling in [revel](https://revel.github.io) apps. - -1. Add the `bugsnagrevel.Filter` immediately after the `revel.PanicFilter` in `app/init.go`: - - ```go - - import "github.com/bugsnag/bugsnag-go/revel" - - revel.Filters = []revel.Filter{ - revel.PanicFilter, - bugsnagrevel.Filter, - // ... - } - ``` - -2. Set bugsnag.apikey in the top section of `conf/app.conf`. - - ``` - module.static=github.com/revel/revel/modules/static - - bugsnag.apikey=YOUR_API_KEY_HERE - - [dev] - ``` - -### Using with Google App Engine - -1. Configure bugsnag at the start of your `init()` function: - - ```go - import "github.com/bugsnag/bugsnag-go" - - func init() { - bugsnag.Configure(bugsnag.Configuration{ - APIKey: "YOUR_API_KEY_HERE", - }) - - // ... - } - ``` - -2. Wrap *every* http.Handler or http.HandlerFunc with Bugsnag: - - ```go - // a. If you're using HandlerFuncs - http.HandleFunc("/", bugsnag.HandlerFunc( - func (w http.ResponseWriter, r *http.Request) { - // ... - })) - - // b. If you're using Handlers - http.Handle("/", bugsnag.Handler(myHttpHandler)) - ``` - -3. In order to use Bugsnag, you must provide the current -[`appengine.Context`](https://developers.google.com/appengine/docs/go/reference#Context), or -current `*http.Request` as rawData. The easiest way to do this is to create a new notifier. - - ```go - c := appengine.NewContext(r) - notifier := bugsnag.New(c) - - if err != nil { - notifier.Notify(err) - } - - go func () { - defer notifier.Recover() - - // ... - }() - ``` - - -## Notifying Bugsnag manually - -Bugsnag will automatically handle any panics that crash your program and notify -you of them. If you've integrated with `revel` or `net/http`, then you'll also -be notified of any panics() that happen while processing a request. - -Sometimes however it's useful to manually notify Bugsnag of a problem. To do this, -call [`bugsnag.Notify()`](https://godoc.org/github.com/bugsnag/bugsnag-go/#Notify) - -```go -if err != nil { - bugsnag.Notify(err) -} -``` - -### Manual panic handling - -To avoid a panic in a goroutine from crashing your entire app, you can use -[`bugsnag.Recover()`](https://godoc.org/github.com/bugsnag/bugsnag-go/#Recover) -to stop a panic from unwinding the stack any further. When `Recover()` is hit, -it will send any current panic to Bugsnag and then stop panicking. This is -most useful at the start of a goroutine: - -```go -go func() { - defer bugsnag.Recover() - - // ... -}() -``` - -Alternatively you can use -[`bugsnag.AutoNotify()`](https://godoc.org/github.com/bugsnag/bugsnag-go/#Recover) -to notify bugsnag of a panic while letting the program continue to panic. This -is useful if you're using a Framework that already has some handling of panics -and you are retrofitting bugsnag support. - -```go -defer bugsnag.AutoNotify() -``` - -## Sending Custom Data - -Most functions in the Bugsnag API, including `bugsnag.Notify()`, -`bugsnag.Recover()`, `bugsnag.AutoNotify()`, and `bugsnag.Handler()` let you -attach data to the notifications that they send. To do this you pass in rawData, -which can be any of the supported types listed here. To add support for more -types of rawData see [OnBeforeNotify](#custom-data-with-onbeforenotify). - -### Custom MetaData - -Custom metaData appears as tabs on Bugsnag.com. You can set it by passing -a [`bugsnag.MetaData`](https://godoc.org/github.com/bugsnag/bugsnag-go/#MetaData) -object as rawData. - -```go -bugsnag.Notify(err, - bugsnag.MetaData{ - "Account": { - "Name": Account.Name, - "Paying": Account.Plan.Premium, - }, - }) -``` - -### Request data - -Bugsnag can extract interesting data from -[`*http.Request`](https://godoc.org/net/http/#Request) objects, and -[`*revel.Controller`](https://godoc.org/github.com/revel/revel/#Controller) -objects. These are automatically passed in when handling panics, and you can -pass them yourself. - -```go -func (w http.ResponseWriter, r *http.Request) { - bugsnag.Notify(err, r) -} -``` - -### User data - -User data is searchable, and the `Id` powers the count of users affected. You -can set which user an error affects by passing a -[`bugsnag.User`](https://godoc.org/github.com/bugsnag/bugsnag-go/#User) object as -rawData. - -```go -bugsnag.Notify(err, - bugsnag.User{Id: "1234", Name: "Conrad", Email: "me@cirw.in"}) -``` - -### Context - -The context shows up prominently in the list view so that you can get an idea -of where a problem occurred. You can set it by passing a -[`bugsnag.Context`](https://godoc.org/github.com/bugsnag/bugsnag-go/#Context) -object as rawData. - -```go -bugsnag.Notify(err, bugsnag.Context{"backgroundJob"}) -``` - -### Severity - -Bugsnag supports three severities, `SeverityError`, `SeverityWarning`, and `SeverityInfo`. -You can set the severity of an error by passing one of these objects as rawData. - -```go -bugsnag.Notify(err, bugsnag.SeverityInfo) -``` - -## Configuration - -You must call `bugsnag.Configure()` at the start of your program to use Bugsnag, you pass it -a [`bugsnag.Configuration`](https://godoc.org/github.com/bugsnag/bugsnag-go/#Configuration) object -containing any of the following values. - -### APIKey - -The Bugsnag API key can be found on your [Bugsnag dashboard](https://bugsnag.com) under "Settings". - -```go -bugsnag.Configure(bugsnag.Configuration{ - APIKey: "YOUR_API_KEY_HERE", -}) -``` - -### Endpoint - -The Bugsnag endpoint defaults to `https://notify.bugsnag.com/`. If you're using Bugsnag enterprise, -you should set this to the endpoint of your local instance. - -```go -bugsnag.Configure(bugsnag.Configuration{ - Endpoint: "http://bugsnag.internal:49000/", -}) -``` - -### ReleaseStage - -The ReleaseStage tracks where your app is deployed. You should set this to `production`, `staging`, -`development` or similar as appropriate. - -```go -bugsnag.Configure(bugsnag.Configuration{ - ReleaseStage: "development", -}) -``` - -### NotifyReleaseStages - -The list of ReleaseStages to notify in. By default Bugsnag will notify you in all release stages, but -you can use this to silence development errors. - -```go -bugsnag.Configure(bugsnag.Configuration{ - NotifyReleaseStages: []string{"production", "staging"}, -}) -``` - -### AppVersion - -If you use a versioning scheme for deploys of your app, Bugsnag can use the `AppVersion` to only -re-open errors if they occur in later version of the app. - -```go -bugsnag.Configure(bugsnag.Configuration{ - AppVersion: "1.2.3", -}) -``` - -### Hostname - -The hostname is used to track where exceptions are coming from in the Bugsnag dashboard. The -default value is obtained from `os.Hostname()` so you won't often need to change this. - -```go -bugsnag.Configure(bugsnag.Configuration{ - Hostname: "go1", -}) -``` - -### ProjectPackages - -In order to determine where a crash happens Bugsnag needs to know which packages you consider to -be part of your app (as opposed to a library). By default this is set to `[]string{"main*"}`. Strings -are matched to package names using [`filepath.Match`](http://godoc.org/path/filepath#Match). - -```go -bugsnag.Configure(bugsnag.Configuration{ - ProjectPackages: []string{"main", "github.com/domain/myapp/*"}, -} -``` - -### ParamsFilters - -Sometimes sensitive data is accidentally included in Bugsnag MetaData. You can remove it by -setting `ParamsFilters`. Any key in the `MetaData` that includes any string in the filters -will be redacted. The default is `[]string{"password", "secret"}`, which prevents fields like -`password`, `password_confirmation` and `secret_answer` from being sent. - -```go -bugsnag.Configure(bugsnag.Configuration{ - ParamsFilters: []string{"password", "secret"}, -} -``` - -### Logger - -The Logger to write to in case of an error inside Bugsnag. This defaults to the global logger. - -```go -bugsnag.Configure(bugsnag.Configuration{ - Logger: app.Logger, -} -``` - -### PanicHandler - -The first time Bugsnag is configured, it wraps the running program in a panic -handler using [panicwrap](http://godoc.org/github.com/ConradIrwin/panicwrap). This -forks a sub-process which monitors unhandled panics. To prevent this, set -`PanicHandler` to `func() {}` the first time you call -`bugsnag.Configure`. This will prevent bugsnag from being able to notify you about -unhandled panics. - -```go -bugsnag.Configure(bugsnag.Configuration{ - PanicHandler: func() {}, -}) -``` - -### Synchronous - -Bugsnag usually starts a new goroutine before sending notifications. This means -that notifications can be lost if you do a bugsnag.Notify and then immediately -os.Exit. To avoid this problem, set Bugsnag to Synchronous (or just `panic()` -instead ;). - -```go -bugsnag.Configure(bugsnag.Configuration{ - Synchronous: true -}) -``` - -Or just for one error: - -```go -bugsnag.Notify(err, bugsnag.Configuration{Synchronous: true}) -``` - -### Transport - -The transport configures how Bugsnag makes http requests. By default we use -[`http.DefaultTransport`](http://godoc.org/net/http#RoundTripper) which handles -HTTP proxies automatically using the `$HTTP_PROXY` environment variable. - -```go -bugsnag.Configure(bugsnag.Configuration{ - Transport: http.DefaultTransport, -}) -``` - -## Custom data with OnBeforeNotify - -While it's nice that you can pass `MetaData` directly into `bugsnag.Notify`, -`bugsnag.AutoNotify`, and `bugsnag.Recover`, this can be a bit cumbersome and -inefficient — you're constructing the meta-data whether or not it will actually -be used. A better idea is to pass raw data in to these functions, and add an -`OnBeforeNotify` filter that converts them into `MetaData`. - -For example, lets say our system processes jobs: - -```go -type Job struct{ - Retry bool - UserId string - UserEmail string - Name string - Params map[string]string -} -``` - -You can pass a job directly into Bugsnag.notify: - -```go -bugsnag.Notify(err, job) -``` - -And then add a filter to extract information from that job and attach it to the -Bugsnag event: - -```go -bugsnag.OnBeforeNotify( - func(event *bugsnag.Event, config *bugsnag.Configuration) error { - - // Search all the RawData for any *Job pointers that we're passed in - // to bugsnag.Notify() and friends. - for _, datum := range event.RawData { - if job, ok := datum.(*Job); ok { - // don't notify bugsnag about errors in retries - if job.Retry { - return fmt.Errorf("not notifying about retried jobs") - } - - // add the job as a tab on Bugsnag.com - event.MetaData.AddStruct("Job", job) - - // set the user correctly - event.User = &User{Id: job.UserId, Email: job.UserEmail} - } - } - - // continue notifying as normal - return nil - }) -``` - -## Advanced Usage - -If you want to have multiple different configurations around in one program, -you can use `bugsnag.New()` to create multiple independent instances of -Bugsnag. You can use these without calling `bugsnag.Configure()`, but bear in -mind that until you call `bugsnag.Configure()` unhandled panics will not be -sent to bugsnag. - -```go -notifier := bugsnag.New(bugsnag.Configuration{ - APIKey: "YOUR_OTHER_API_KEY", -}) -``` - -In fact any place that lets you pass in `rawData` also allows you to pass in -configuration. For example to send http errors to one bugsnag project, you -could do: - -```go -bugsnag.Handler(nil, bugsnag.Configuration{APIKey: "YOUR_OTHER_API_KEY"}) -``` - -### GroupingHash - -If you need to override Bugsnag's grouping algorithm, you can set the -`GroupingHash` in an `OnBeforeNotify`: - -```go -bugsnag.OnBeforeNotify( - func (event *bugsnag.Event, config *bugsnag.Configuration) error { - event.GroupingHash = calculateGroupingHash(event) - return nil - }) -``` diff --git a/vendor/github.com/bugsnag/bugsnag-go/errors/README.md b/vendor/github.com/bugsnag/bugsnag-go/errors/README.md deleted file mode 100644 index 8d8e097a..00000000 --- a/vendor/github.com/bugsnag/bugsnag-go/errors/README.md +++ /dev/null @@ -1,6 +0,0 @@ -Adds stacktraces to errors in golang. - -This was made to help build the Bugsnag notifier but can be used standalone if -you like to have stacktraces on errors. - -See [Godoc](https://godoc.org/github.com/bugsnag/bugsnag-go/errors) for the API docs. diff --git a/vendor/github.com/bugsnag/panicwrap/README.md b/vendor/github.com/bugsnag/panicwrap/README.md deleted file mode 100644 index d0a59675..00000000 --- a/vendor/github.com/bugsnag/panicwrap/README.md +++ /dev/null @@ -1,101 +0,0 @@ -# panicwrap - -panicwrap is a Go library that re-executes a Go binary and monitors stderr -output from the binary for a panic. When it find a panic, it executes a -user-defined handler function. Stdout, stderr, stdin, signals, and exit -codes continue to work as normal, making the existence of panicwrap mostly -invisble to the end user until a panic actually occurs. - -Since a panic is truly a bug in the program meant to crash the runtime, -globally catching panics within Go applications is not supposed to be possible. -Despite this, it is often useful to have a way to know when panics occur. -panicwrap allows you to do something with these panics, such as writing them -to a file, so that you can track when panics occur. - -panicwrap is ***not a panic recovery system***. Panics indicate serious -problems with your application and _should_ crash the runtime. panicwrap -is just meant as a way to monitor for panics. If you still think this is -the worst idea ever, read the section below on why. - -## Features - -* **SIMPLE!** -* Works with all Go applications on all platforms Go supports -* Custom behavior when a panic occurs -* Stdout, stderr, stdin, exit codes, and signals continue to work as - expected. - -## Usage - -Using panicwrap is simple. It behaves a lot like `fork`, if you know -how that works. A basic example is shown below. - -Because it would be sad to panic while capturing a panic, it is recommended -that the handler functions for panicwrap remain relatively simple and well -tested. panicwrap itself contains many tests. - -```go -package main - -import ( - "fmt" - "github.com/mitchellh/panicwrap" - "os" -) - -func main() { - exitStatus, err := panicwrap.BasicWrap(panicHandler) - if err != nil { - // Something went wrong setting up the panic wrapper. Unlikely, - // but possible. - panic(err) - } - - // If exitStatus >= 0, then we're the parent process and the panicwrap - // re-executed ourselves and completed. Just exit with the proper status. - if exitStatus >= 0 { - os.Exit(exitStatus) - } - - // Otherwise, exitStatus < 0 means we're the child. Continue executing as - // normal... - - // Let's say we panic - panic("oh shucks") -} - -func panicHandler(output string) { - // output contains the full output (including stack traces) of the - // panic. Put it in a file or something. - fmt.Printf("The child panicked:\n\n%s\n", output) - os.Exit(1) -} -``` - -## How Does it Work? - -panicwrap works by re-executing the running program (retaining arguments, -environmental variables, etc.) and monitoring the stderr of the program. -Since Go always outputs panics in a predictable way with a predictable -exit code, panicwrap is able to reliably detect panics and allow the parent -process to handle them. - -## WHY?! Panics should CRASH! - -Yes, panics _should_ crash. They are 100% always indicative of bugs. -However, in some cases, such as user-facing programs (programs like -[Packer](http://github.com/mitchellh/packer) or -[Docker](http://github.com/dotcloud/docker)), it is up to the user to -report such panics. This is unreliable, at best, and it would be better if the -program could have a way to automatically report panics. panicwrap provides -a way to do this. - -For backend applications, it is easier to detect crashes (since the application -exits). However, it is still nice sometimes to more intelligently log -panics in some way. For example, at [HashiCorp](http://www.hashicorp.com), -we use panicwrap to log panics to timestamped files with some additional -data (configuration settings at the time, environmental variables, etc.) - -The goal of panicwrap is _not_ to hide panics. It is instead to provide -a clean mechanism for handling them before bubbling the up to the user -and ultimately crashing. diff --git a/vendor/github.com/denverdino/aliyungo/LICENSE.txt b/vendor/github.com/denverdino/aliyungo/LICENSE.txt deleted file mode 100644 index 91829713..00000000 --- a/vendor/github.com/denverdino/aliyungo/LICENSE.txt +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2015-2015 Li Yi (denverdino@gmail.com). - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/docker/libtrust/CONTRIBUTING.md b/vendor/github.com/docker/libtrust/CONTRIBUTING.md deleted file mode 100644 index 05be0f8a..00000000 --- a/vendor/github.com/docker/libtrust/CONTRIBUTING.md +++ /dev/null @@ -1,13 +0,0 @@ -# Contributing to libtrust - -Want to hack on libtrust? Awesome! Here are instructions to get you -started. - -libtrust is a part of the [Docker](https://www.docker.com) project, and follows -the same rules and principles. If you're already familiar with the way -Docker does things, you'll feel right at home. - -Otherwise, go read -[Docker's contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md). - -Happy hacking! diff --git a/vendor/github.com/docker/libtrust/MAINTAINERS b/vendor/github.com/docker/libtrust/MAINTAINERS deleted file mode 100644 index 9768175f..00000000 --- a/vendor/github.com/docker/libtrust/MAINTAINERS +++ /dev/null @@ -1,3 +0,0 @@ -Solomon Hykes -Josh Hawn (github: jlhawn) -Derek McGowan (github: dmcgowan) diff --git a/vendor/github.com/docker/libtrust/README.md b/vendor/github.com/docker/libtrust/README.md deleted file mode 100644 index 8e7db381..00000000 --- a/vendor/github.com/docker/libtrust/README.md +++ /dev/null @@ -1,18 +0,0 @@ -# libtrust - -Libtrust is library for managing authentication and authorization using public key cryptography. - -Authentication is handled using the identity attached to the public key. -Libtrust provides multiple methods to prove possession of the private key associated with an identity. - - TLS x509 certificates - - Signature verification - - Key Challenge - -Authorization and access control is managed through a distributed trust graph. -Trust servers are used as the authorities of the trust graph and allow caching portions of the graph for faster access. - -## Copyright and license - -Code and documentation copyright 2014 Docker, inc. Code released under the Apache 2.0 license. -Docs released under Creative commons. - diff --git a/vendor/github.com/garyburd/redigo/internal/commandinfo.go b/vendor/github.com/garyburd/redigo/internal/commandinfo.go index ce78eff6..7ad1ade5 100644 --- a/vendor/github.com/garyburd/redigo/internal/commandinfo.go +++ b/vendor/github.com/garyburd/redigo/internal/commandinfo.go @@ -12,7 +12,7 @@ // License for the specific language governing permissions and limitations // under the License. -package internal +package internal // import "github.com/garyburd/redigo/internal" import ( "strings" diff --git a/vendor/github.com/garyburd/redigo/redis/doc.go b/vendor/github.com/garyburd/redigo/redis/doc.go index 1ae6f0cc..a5cd454a 100644 --- a/vendor/github.com/garyburd/redigo/redis/doc.go +++ b/vendor/github.com/garyburd/redigo/redis/doc.go @@ -166,4 +166,4 @@ // if _, err := redis.Scan(reply, &value1, &value2); err != nil { // // handle error // } -package redis +package redis // import "github.com/garyburd/redigo/redis" diff --git a/vendor/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/LICENSE b/vendor/github.com/go-ini/ini/LICENSE similarity index 100% rename from vendor/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/LICENSE rename to vendor/github.com/go-ini/ini/LICENSE diff --git a/vendor/github.com/go-ini/ini/error.go b/vendor/github.com/go-ini/ini/error.go new file mode 100644 index 00000000..80afe743 --- /dev/null +++ b/vendor/github.com/go-ini/ini/error.go @@ -0,0 +1,32 @@ +// Copyright 2016 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "fmt" +) + +type ErrDelimiterNotFound struct { + Line string +} + +func IsErrDelimiterNotFound(err error) bool { + _, ok := err.(ErrDelimiterNotFound) + return ok +} + +func (err ErrDelimiterNotFound) Error() string { + return fmt.Sprintf("key-value delimiter not found: %s", err.Line) +} diff --git a/vendor/github.com/go-ini/ini/ini.go b/vendor/github.com/go-ini/ini/ini.go new file mode 100644 index 00000000..cd065e78 --- /dev/null +++ b/vendor/github.com/go-ini/ini/ini.go @@ -0,0 +1,501 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +// Package ini provides INI file read and write functionality in Go. +package ini + +import ( + "bytes" + "errors" + "fmt" + "io" + "os" + "regexp" + "runtime" + "strconv" + "strings" + "sync" + "time" +) + +const ( + // Name for default section. You can use this constant or the string literal. + // In most of cases, an empty string is all you need to access the section. + DEFAULT_SECTION = "DEFAULT" + + // Maximum allowed depth when recursively substituing variable names. + _DEPTH_VALUES = 99 + _VERSION = "1.21.1" +) + +// Version returns current package version literal. +func Version() string { + return _VERSION +} + +var ( + // Delimiter to determine or compose a new line. + // This variable will be changed to "\r\n" automatically on Windows + // at package init time. + LineBreak = "\n" + + // Variable regexp pattern: %(variable)s + varPattern = regexp.MustCompile(`%\(([^\)]+)\)s`) + + // Indicate whether to align "=" sign with spaces to produce pretty output + // or reduce all possible spaces for compact format. + PrettyFormat = true + + // Explicitly write DEFAULT section header + DefaultHeader = false +) + +func init() { + if runtime.GOOS == "windows" { + LineBreak = "\r\n" + } +} + +func inSlice(str string, s []string) bool { + for _, v := range s { + if str == v { + return true + } + } + return false +} + +// dataSource is an interface that returns object which can be read and closed. +type dataSource interface { + ReadCloser() (io.ReadCloser, error) +} + +// sourceFile represents an object that contains content on the local file system. +type sourceFile struct { + name string +} + +func (s sourceFile) ReadCloser() (_ io.ReadCloser, err error) { + return os.Open(s.name) +} + +type bytesReadCloser struct { + reader io.Reader +} + +func (rc *bytesReadCloser) Read(p []byte) (n int, err error) { + return rc.reader.Read(p) +} + +func (rc *bytesReadCloser) Close() error { + return nil +} + +// sourceData represents an object that contains content in memory. +type sourceData struct { + data []byte +} + +func (s *sourceData) ReadCloser() (io.ReadCloser, error) { + return &bytesReadCloser{bytes.NewReader(s.data)}, nil +} + +// File represents a combination of a or more INI file(s) in memory. +type File struct { + // Should make things safe, but sometimes doesn't matter. + BlockMode bool + // Make sure data is safe in multiple goroutines. + lock sync.RWMutex + + // Allow combination of multiple data sources. + dataSources []dataSource + // Actual data is stored here. + sections map[string]*Section + + // To keep data in order. + sectionList []string + + options LoadOptions + + NameMapper + ValueMapper +} + +// newFile initializes File object with given data sources. +func newFile(dataSources []dataSource, opts LoadOptions) *File { + return &File{ + BlockMode: true, + dataSources: dataSources, + sections: make(map[string]*Section), + sectionList: make([]string, 0, 10), + options: opts, + } +} + +func parseDataSource(source interface{}) (dataSource, error) { + switch s := source.(type) { + case string: + return sourceFile{s}, nil + case []byte: + return &sourceData{s}, nil + default: + return nil, fmt.Errorf("error parsing data source: unknown type '%s'", s) + } +} + +type LoadOptions struct { + // Loose indicates whether the parser should ignore nonexistent files or return error. + Loose bool + // Insensitive indicates whether the parser forces all section and key names to lowercase. + Insensitive bool + // IgnoreContinuation indicates whether to ignore continuation lines while parsing. + IgnoreContinuation bool + // AllowBooleanKeys indicates whether to allow boolean type keys or treat as value is missing. + // This type of keys are mostly used in my.cnf. + AllowBooleanKeys bool +} + +func LoadSources(opts LoadOptions, source interface{}, others ...interface{}) (_ *File, err error) { + sources := make([]dataSource, len(others)+1) + sources[0], err = parseDataSource(source) + if err != nil { + return nil, err + } + for i := range others { + sources[i+1], err = parseDataSource(others[i]) + if err != nil { + return nil, err + } + } + f := newFile(sources, opts) + if err = f.Reload(); err != nil { + return nil, err + } + return f, nil +} + +// Load loads and parses from INI data sources. +// Arguments can be mixed of file name with string type, or raw data in []byte. +// It will return error if list contains nonexistent files. +func Load(source interface{}, others ...interface{}) (*File, error) { + return LoadSources(LoadOptions{}, source, others...) +} + +// LooseLoad has exactly same functionality as Load function +// except it ignores nonexistent files instead of returning error. +func LooseLoad(source interface{}, others ...interface{}) (*File, error) { + return LoadSources(LoadOptions{Loose: true}, source, others...) +} + +// InsensitiveLoad has exactly same functionality as Load function +// except it forces all section and key names to be lowercased. +func InsensitiveLoad(source interface{}, others ...interface{}) (*File, error) { + return LoadSources(LoadOptions{Insensitive: true}, source, others...) +} + +// Empty returns an empty file object. +func Empty() *File { + // Ignore error here, we sure our data is good. + f, _ := Load([]byte("")) + return f +} + +// NewSection creates a new section. +func (f *File) NewSection(name string) (*Section, error) { + if len(name) == 0 { + return nil, errors.New("error creating new section: empty section name") + } else if f.options.Insensitive && name != DEFAULT_SECTION { + name = strings.ToLower(name) + } + + if f.BlockMode { + f.lock.Lock() + defer f.lock.Unlock() + } + + if inSlice(name, f.sectionList) { + return f.sections[name], nil + } + + f.sectionList = append(f.sectionList, name) + f.sections[name] = newSection(f, name) + return f.sections[name], nil +} + +// NewSections creates a list of sections. +func (f *File) NewSections(names ...string) (err error) { + for _, name := range names { + if _, err = f.NewSection(name); err != nil { + return err + } + } + return nil +} + +// GetSection returns section by given name. +func (f *File) GetSection(name string) (*Section, error) { + if len(name) == 0 { + name = DEFAULT_SECTION + } else if f.options.Insensitive { + name = strings.ToLower(name) + } + + if f.BlockMode { + f.lock.RLock() + defer f.lock.RUnlock() + } + + sec := f.sections[name] + if sec == nil { + return nil, fmt.Errorf("section '%s' does not exist", name) + } + return sec, nil +} + +// Section assumes named section exists and returns a zero-value when not. +func (f *File) Section(name string) *Section { + sec, err := f.GetSection(name) + if err != nil { + // Note: It's OK here because the only possible error is empty section name, + // but if it's empty, this piece of code won't be executed. + sec, _ = f.NewSection(name) + return sec + } + return sec +} + +// Section returns list of Section. +func (f *File) Sections() []*Section { + sections := make([]*Section, len(f.sectionList)) + for i := range f.sectionList { + sections[i] = f.Section(f.sectionList[i]) + } + return sections +} + +// SectionStrings returns list of section names. +func (f *File) SectionStrings() []string { + list := make([]string, len(f.sectionList)) + copy(list, f.sectionList) + return list +} + +// DeleteSection deletes a section. +func (f *File) DeleteSection(name string) { + if f.BlockMode { + f.lock.Lock() + defer f.lock.Unlock() + } + + if len(name) == 0 { + name = DEFAULT_SECTION + } + + for i, s := range f.sectionList { + if s == name { + f.sectionList = append(f.sectionList[:i], f.sectionList[i+1:]...) + delete(f.sections, name) + return + } + } +} + +func (f *File) reload(s dataSource) error { + r, err := s.ReadCloser() + if err != nil { + return err + } + defer r.Close() + + return f.parse(r) +} + +// Reload reloads and parses all data sources. +func (f *File) Reload() (err error) { + for _, s := range f.dataSources { + if err = f.reload(s); err != nil { + // In loose mode, we create an empty default section for nonexistent files. + if os.IsNotExist(err) && f.options.Loose { + f.parse(bytes.NewBuffer(nil)) + continue + } + return err + } + } + return nil +} + +// Append appends one or more data sources and reloads automatically. +func (f *File) Append(source interface{}, others ...interface{}) error { + ds, err := parseDataSource(source) + if err != nil { + return err + } + f.dataSources = append(f.dataSources, ds) + for _, s := range others { + ds, err = parseDataSource(s) + if err != nil { + return err + } + f.dataSources = append(f.dataSources, ds) + } + return f.Reload() +} + +// WriteToIndent writes content into io.Writer with given indention. +// If PrettyFormat has been set to be true, +// it will align "=" sign with spaces under each section. +func (f *File) WriteToIndent(w io.Writer, indent string) (n int64, err error) { + equalSign := "=" + if PrettyFormat { + equalSign = " = " + } + + // Use buffer to make sure target is safe until finish encoding. + buf := bytes.NewBuffer(nil) + for i, sname := range f.sectionList { + sec := f.Section(sname) + if len(sec.Comment) > 0 { + if sec.Comment[0] != '#' && sec.Comment[0] != ';' { + sec.Comment = "; " + sec.Comment + } + if _, err = buf.WriteString(sec.Comment + LineBreak); err != nil { + return 0, err + } + } + + if i > 0 || DefaultHeader { + if _, err = buf.WriteString("[" + sname + "]" + LineBreak); err != nil { + return 0, err + } + } else { + // Write nothing if default section is empty + if len(sec.keyList) == 0 { + continue + } + } + + // Count and generate alignment length and buffer spaces using the + // longest key. Keys may be modifed if they contain certain characters so + // we need to take that into account in our calculation. + alignLength := 0 + if PrettyFormat { + for _, kname := range sec.keyList { + keyLength := len(kname) + // First case will surround key by ` and second by """ + if strings.ContainsAny(kname, "\"=:") { + keyLength += 2 + } else if strings.Contains(kname, "`") { + keyLength += 6 + } + + if keyLength > alignLength { + alignLength = keyLength + } + } + } + alignSpaces := bytes.Repeat([]byte(" "), alignLength) + + for _, kname := range sec.keyList { + key := sec.Key(kname) + if len(key.Comment) > 0 { + if len(indent) > 0 && sname != DEFAULT_SECTION { + buf.WriteString(indent) + } + if key.Comment[0] != '#' && key.Comment[0] != ';' { + key.Comment = "; " + key.Comment + } + if _, err = buf.WriteString(key.Comment + LineBreak); err != nil { + return 0, err + } + } + + if len(indent) > 0 && sname != DEFAULT_SECTION { + buf.WriteString(indent) + } + + switch { + case key.isAutoIncrement: + kname = "-" + case strings.ContainsAny(kname, "\"=:"): + kname = "`" + kname + "`" + case strings.Contains(kname, "`"): + kname = `"""` + kname + `"""` + } + if _, err = buf.WriteString(kname); err != nil { + return 0, err + } + + if key.isBooleanType { + continue + } + + // Write out alignment spaces before "=" sign + if PrettyFormat { + buf.Write(alignSpaces[:alignLength-len(kname)]) + } + + val := key.value + // In case key value contains "\n", "`", "\"", "#" or ";" + if strings.ContainsAny(val, "\n`") { + val = `"""` + val + `"""` + } else if strings.ContainsAny(val, "#;") { + val = "`" + val + "`" + } + if _, err = buf.WriteString(equalSign + val + LineBreak); err != nil { + return 0, err + } + } + + // Put a line between sections + if _, err = buf.WriteString(LineBreak); err != nil { + return 0, err + } + } + + return buf.WriteTo(w) +} + +// WriteTo writes file content into io.Writer. +func (f *File) WriteTo(w io.Writer) (int64, error) { + return f.WriteToIndent(w, "") +} + +// SaveToIndent writes content to file system with given value indention. +func (f *File) SaveToIndent(filename, indent string) error { + // Note: Because we are truncating with os.Create, + // so it's safer to save to a temporary file location and rename afte done. + tmpPath := filename + "." + strconv.Itoa(time.Now().Nanosecond()) + ".tmp" + defer os.Remove(tmpPath) + + fw, err := os.Create(tmpPath) + if err != nil { + return err + } + + if _, err = f.WriteToIndent(fw, indent); err != nil { + fw.Close() + return err + } + fw.Close() + + // Remove old file and rename the new one. + os.Remove(filename) + return os.Rename(tmpPath, filename) +} + +// SaveTo writes content to file system. +func (f *File) SaveTo(filename string) error { + return f.SaveToIndent(filename, "") +} diff --git a/vendor/github.com/go-ini/ini/key.go b/vendor/github.com/go-ini/ini/key.go new file mode 100644 index 00000000..9738c55a --- /dev/null +++ b/vendor/github.com/go-ini/ini/key.go @@ -0,0 +1,633 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "fmt" + "strconv" + "strings" + "time" +) + +// Key represents a key under a section. +type Key struct { + s *Section + name string + value string + isAutoIncrement bool + isBooleanType bool + + Comment string +} + +// ValueMapper represents a mapping function for values, e.g. os.ExpandEnv +type ValueMapper func(string) string + +// Name returns name of key. +func (k *Key) Name() string { + return k.name +} + +// Value returns raw value of key for performance purpose. +func (k *Key) Value() string { + return k.value +} + +// String returns string representation of value. +func (k *Key) String() string { + val := k.value + if k.s.f.ValueMapper != nil { + val = k.s.f.ValueMapper(val) + } + if strings.Index(val, "%") == -1 { + return val + } + + for i := 0; i < _DEPTH_VALUES; i++ { + vr := varPattern.FindString(val) + if len(vr) == 0 { + break + } + + // Take off leading '%(' and trailing ')s'. + noption := strings.TrimLeft(vr, "%(") + noption = strings.TrimRight(noption, ")s") + + // Search in the same section. + nk, err := k.s.GetKey(noption) + if err != nil { + // Search again in default section. + nk, _ = k.s.f.Section("").GetKey(noption) + } + + // Substitute by new value and take off leading '%(' and trailing ')s'. + val = strings.Replace(val, vr, nk.value, -1) + } + return val +} + +// Validate accepts a validate function which can +// return modifed result as key value. +func (k *Key) Validate(fn func(string) string) string { + return fn(k.String()) +} + +// parseBool returns the boolean value represented by the string. +// +// It accepts 1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On, +// 0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off. +// Any other value returns an error. +func parseBool(str string) (value bool, err error) { + switch str { + case "1", "t", "T", "true", "TRUE", "True", "YES", "yes", "Yes", "y", "ON", "on", "On": + return true, nil + case "0", "f", "F", "false", "FALSE", "False", "NO", "no", "No", "n", "OFF", "off", "Off": + return false, nil + } + return false, fmt.Errorf("parsing \"%s\": invalid syntax", str) +} + +// Bool returns bool type value. +func (k *Key) Bool() (bool, error) { + return parseBool(k.String()) +} + +// Float64 returns float64 type value. +func (k *Key) Float64() (float64, error) { + return strconv.ParseFloat(k.String(), 64) +} + +// Int returns int type value. +func (k *Key) Int() (int, error) { + return strconv.Atoi(k.String()) +} + +// Int64 returns int64 type value. +func (k *Key) Int64() (int64, error) { + return strconv.ParseInt(k.String(), 10, 64) +} + +// Uint returns uint type valued. +func (k *Key) Uint() (uint, error) { + u, e := strconv.ParseUint(k.String(), 10, 64) + return uint(u), e +} + +// Uint64 returns uint64 type value. +func (k *Key) Uint64() (uint64, error) { + return strconv.ParseUint(k.String(), 10, 64) +} + +// Duration returns time.Duration type value. +func (k *Key) Duration() (time.Duration, error) { + return time.ParseDuration(k.String()) +} + +// TimeFormat parses with given format and returns time.Time type value. +func (k *Key) TimeFormat(format string) (time.Time, error) { + return time.Parse(format, k.String()) +} + +// Time parses with RFC3339 format and returns time.Time type value. +func (k *Key) Time() (time.Time, error) { + return k.TimeFormat(time.RFC3339) +} + +// MustString returns default value if key value is empty. +func (k *Key) MustString(defaultVal string) string { + val := k.String() + if len(val) == 0 { + k.value = defaultVal + return defaultVal + } + return val +} + +// MustBool always returns value without error, +// it returns false if error occurs. +func (k *Key) MustBool(defaultVal ...bool) bool { + val, err := k.Bool() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatBool(defaultVal[0]) + return defaultVal[0] + } + return val +} + +// MustFloat64 always returns value without error, +// it returns 0.0 if error occurs. +func (k *Key) MustFloat64(defaultVal ...float64) float64 { + val, err := k.Float64() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatFloat(defaultVal[0], 'f', -1, 64) + return defaultVal[0] + } + return val +} + +// MustInt always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustInt(defaultVal ...int) int { + val, err := k.Int() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatInt(int64(defaultVal[0]), 10) + return defaultVal[0] + } + return val +} + +// MustInt64 always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustInt64(defaultVal ...int64) int64 { + val, err := k.Int64() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatInt(defaultVal[0], 10) + return defaultVal[0] + } + return val +} + +// MustUint always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustUint(defaultVal ...uint) uint { + val, err := k.Uint() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatUint(uint64(defaultVal[0]), 10) + return defaultVal[0] + } + return val +} + +// MustUint64 always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustUint64(defaultVal ...uint64) uint64 { + val, err := k.Uint64() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatUint(defaultVal[0], 10) + return defaultVal[0] + } + return val +} + +// MustDuration always returns value without error, +// it returns zero value if error occurs. +func (k *Key) MustDuration(defaultVal ...time.Duration) time.Duration { + val, err := k.Duration() + if len(defaultVal) > 0 && err != nil { + k.value = defaultVal[0].String() + return defaultVal[0] + } + return val +} + +// MustTimeFormat always parses with given format and returns value without error, +// it returns zero value if error occurs. +func (k *Key) MustTimeFormat(format string, defaultVal ...time.Time) time.Time { + val, err := k.TimeFormat(format) + if len(defaultVal) > 0 && err != nil { + k.value = defaultVal[0].Format(format) + return defaultVal[0] + } + return val +} + +// MustTime always parses with RFC3339 format and returns value without error, +// it returns zero value if error occurs. +func (k *Key) MustTime(defaultVal ...time.Time) time.Time { + return k.MustTimeFormat(time.RFC3339, defaultVal...) +} + +// In always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) In(defaultVal string, candidates []string) string { + val := k.String() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InFloat64 always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InFloat64(defaultVal float64, candidates []float64) float64 { + val := k.MustFloat64() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InInt always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InInt(defaultVal int, candidates []int) int { + val := k.MustInt() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InInt64 always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InInt64(defaultVal int64, candidates []int64) int64 { + val := k.MustInt64() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InUint always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InUint(defaultVal uint, candidates []uint) uint { + val := k.MustUint() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InUint64 always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InUint64(defaultVal uint64, candidates []uint64) uint64 { + val := k.MustUint64() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InTimeFormat always parses with given format and returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InTimeFormat(format string, defaultVal time.Time, candidates []time.Time) time.Time { + val := k.MustTimeFormat(format) + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InTime always parses with RFC3339 format and returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InTime(defaultVal time.Time, candidates []time.Time) time.Time { + return k.InTimeFormat(time.RFC3339, defaultVal, candidates) +} + +// RangeFloat64 checks if value is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeFloat64(defaultVal, min, max float64) float64 { + val := k.MustFloat64() + if val < min || val > max { + return defaultVal + } + return val +} + +// RangeInt checks if value is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeInt(defaultVal, min, max int) int { + val := k.MustInt() + if val < min || val > max { + return defaultVal + } + return val +} + +// RangeInt64 checks if value is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeInt64(defaultVal, min, max int64) int64 { + val := k.MustInt64() + if val < min || val > max { + return defaultVal + } + return val +} + +// RangeTimeFormat checks if value with given format is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeTimeFormat(format string, defaultVal, min, max time.Time) time.Time { + val := k.MustTimeFormat(format) + if val.Unix() < min.Unix() || val.Unix() > max.Unix() { + return defaultVal + } + return val +} + +// RangeTime checks if value with RFC3339 format is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeTime(defaultVal, min, max time.Time) time.Time { + return k.RangeTimeFormat(time.RFC3339, defaultVal, min, max) +} + +// Strings returns list of string divided by given delimiter. +func (k *Key) Strings(delim string) []string { + str := k.String() + if len(str) == 0 { + return []string{} + } + + vals := strings.Split(str, delim) + for i := range vals { + vals[i] = strings.TrimSpace(vals[i]) + } + return vals +} + +// Float64s returns list of float64 divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Float64s(delim string) []float64 { + vals, _ := k.getFloat64s(delim, true, false) + return vals +} + +// Ints returns list of int divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Ints(delim string) []int { + vals, _ := k.getInts(delim, true, false) + return vals +} + +// Int64s returns list of int64 divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Int64s(delim string) []int64 { + vals, _ := k.getInt64s(delim, true, false) + return vals +} + +// Uints returns list of uint divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Uints(delim string) []uint { + vals, _ := k.getUints(delim, true, false) + return vals +} + +// Uint64s returns list of uint64 divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Uint64s(delim string) []uint64 { + vals, _ := k.getUint64s(delim, true, false) + return vals +} + +// TimesFormat parses with given format and returns list of time.Time divided by given delimiter. +// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC). +func (k *Key) TimesFormat(format, delim string) []time.Time { + vals, _ := k.getTimesFormat(format, delim, true, false) + return vals +} + +// Times parses with RFC3339 format and returns list of time.Time divided by given delimiter. +// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC). +func (k *Key) Times(delim string) []time.Time { + return k.TimesFormat(time.RFC3339, delim) +} + +// ValidFloat64s returns list of float64 divided by given delimiter. If some value is not float, then +// it will not be included to result list. +func (k *Key) ValidFloat64s(delim string) []float64 { + vals, _ := k.getFloat64s(delim, false, false) + return vals +} + +// ValidInts returns list of int divided by given delimiter. If some value is not integer, then it will +// not be included to result list. +func (k *Key) ValidInts(delim string) []int { + vals, _ := k.getInts(delim, false, false) + return vals +} + +// ValidInt64s returns list of int64 divided by given delimiter. If some value is not 64-bit integer, +// then it will not be included to result list. +func (k *Key) ValidInt64s(delim string) []int64 { + vals, _ := k.getInt64s(delim, false, false) + return vals +} + +// ValidUints returns list of uint divided by given delimiter. If some value is not unsigned integer, +// then it will not be included to result list. +func (k *Key) ValidUints(delim string) []uint { + vals, _ := k.getUints(delim, false, false) + return vals +} + +// ValidUint64s returns list of uint64 divided by given delimiter. If some value is not 64-bit unsigned +// integer, then it will not be included to result list. +func (k *Key) ValidUint64s(delim string) []uint64 { + vals, _ := k.getUint64s(delim, false, false) + return vals +} + +// ValidTimesFormat parses with given format and returns list of time.Time divided by given delimiter. +func (k *Key) ValidTimesFormat(format, delim string) []time.Time { + vals, _ := k.getTimesFormat(format, delim, false, false) + return vals +} + +// ValidTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter. +func (k *Key) ValidTimes(delim string) []time.Time { + return k.ValidTimesFormat(time.RFC3339, delim) +} + +// StrictFloat64s returns list of float64 divided by given delimiter or error on first invalid input. +func (k *Key) StrictFloat64s(delim string) ([]float64, error) { + return k.getFloat64s(delim, false, true) +} + +// StrictInts returns list of int divided by given delimiter or error on first invalid input. +func (k *Key) StrictInts(delim string) ([]int, error) { + return k.getInts(delim, false, true) +} + +// StrictInt64s returns list of int64 divided by given delimiter or error on first invalid input. +func (k *Key) StrictInt64s(delim string) ([]int64, error) { + return k.getInt64s(delim, false, true) +} + +// StrictUints returns list of uint divided by given delimiter or error on first invalid input. +func (k *Key) StrictUints(delim string) ([]uint, error) { + return k.getUints(delim, false, true) +} + +// StrictUint64s returns list of uint64 divided by given delimiter or error on first invalid input. +func (k *Key) StrictUint64s(delim string) ([]uint64, error) { + return k.getUint64s(delim, false, true) +} + +// StrictTimesFormat parses with given format and returns list of time.Time divided by given delimiter +// or error on first invalid input. +func (k *Key) StrictTimesFormat(format, delim string) ([]time.Time, error) { + return k.getTimesFormat(format, delim, false, true) +} + +// StrictTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter +// or error on first invalid input. +func (k *Key) StrictTimes(delim string) ([]time.Time, error) { + return k.StrictTimesFormat(time.RFC3339, delim) +} + +// getFloat64s returns list of float64 divided by given delimiter. +func (k *Key) getFloat64s(delim string, addInvalid, returnOnInvalid bool) ([]float64, error) { + strs := k.Strings(delim) + vals := make([]float64, 0, len(strs)) + for _, str := range strs { + val, err := strconv.ParseFloat(str, 64) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, val) + } + } + return vals, nil +} + +// getInts returns list of int divided by given delimiter. +func (k *Key) getInts(delim string, addInvalid, returnOnInvalid bool) ([]int, error) { + strs := k.Strings(delim) + vals := make([]int, 0, len(strs)) + for _, str := range strs { + val, err := strconv.Atoi(str) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, val) + } + } + return vals, nil +} + +// getInt64s returns list of int64 divided by given delimiter. +func (k *Key) getInt64s(delim string, addInvalid, returnOnInvalid bool) ([]int64, error) { + strs := k.Strings(delim) + vals := make([]int64, 0, len(strs)) + for _, str := range strs { + val, err := strconv.ParseInt(str, 10, 64) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, val) + } + } + return vals, nil +} + +// getUints returns list of uint divided by given delimiter. +func (k *Key) getUints(delim string, addInvalid, returnOnInvalid bool) ([]uint, error) { + strs := k.Strings(delim) + vals := make([]uint, 0, len(strs)) + for _, str := range strs { + val, err := strconv.ParseUint(str, 10, 0) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, uint(val)) + } + } + return vals, nil +} + +// getUint64s returns list of uint64 divided by given delimiter. +func (k *Key) getUint64s(delim string, addInvalid, returnOnInvalid bool) ([]uint64, error) { + strs := k.Strings(delim) + vals := make([]uint64, 0, len(strs)) + for _, str := range strs { + val, err := strconv.ParseUint(str, 10, 64) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, val) + } + } + return vals, nil +} + +// getTimesFormat parses with given format and returns list of time.Time divided by given delimiter. +func (k *Key) getTimesFormat(format, delim string, addInvalid, returnOnInvalid bool) ([]time.Time, error) { + strs := k.Strings(delim) + vals := make([]time.Time, 0, len(strs)) + for _, str := range strs { + val, err := time.Parse(format, str) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, val) + } + } + return vals, nil +} + +// SetValue changes key value. +func (k *Key) SetValue(v string) { + if k.s.f.BlockMode { + k.s.f.lock.Lock() + defer k.s.f.lock.Unlock() + } + + k.value = v + k.s.keysHash[k.name] = v +} diff --git a/vendor/github.com/go-ini/ini/parser.go b/vendor/github.com/go-ini/ini/parser.go new file mode 100644 index 00000000..dc6df87a --- /dev/null +++ b/vendor/github.com/go-ini/ini/parser.go @@ -0,0 +1,325 @@ +// Copyright 2015 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strconv" + "strings" + "unicode" +) + +type tokenType int + +const ( + _TOKEN_INVALID tokenType = iota + _TOKEN_COMMENT + _TOKEN_SECTION + _TOKEN_KEY +) + +type parser struct { + buf *bufio.Reader + isEOF bool + count int + comment *bytes.Buffer +} + +func newParser(r io.Reader) *parser { + return &parser{ + buf: bufio.NewReader(r), + count: 1, + comment: &bytes.Buffer{}, + } +} + +// BOM handles header of BOM-UTF8 format. +// http://en.wikipedia.org/wiki/Byte_order_mark#Representations_of_byte_order_marks_by_encoding +func (p *parser) BOM() error { + mask, err := p.buf.Peek(3) + if err != nil && err != io.EOF { + return err + } else if len(mask) < 3 { + return nil + } else if mask[0] == 239 && mask[1] == 187 && mask[2] == 191 { + p.buf.Read(mask) + } + return nil +} + +func (p *parser) readUntil(delim byte) ([]byte, error) { + data, err := p.buf.ReadBytes(delim) + if err != nil { + if err == io.EOF { + p.isEOF = true + } else { + return nil, err + } + } + return data, nil +} + +func cleanComment(in []byte) ([]byte, bool) { + i := bytes.IndexAny(in, "#;") + if i == -1 { + return nil, false + } + return in[i:], true +} + +func readKeyName(in []byte) (string, int, error) { + line := string(in) + + // Check if key name surrounded by quotes. + var keyQuote string + if line[0] == '"' { + if len(line) > 6 && string(line[0:3]) == `"""` { + keyQuote = `"""` + } else { + keyQuote = `"` + } + } else if line[0] == '`' { + keyQuote = "`" + } + + // Get out key name + endIdx := -1 + if len(keyQuote) > 0 { + startIdx := len(keyQuote) + // FIXME: fail case -> """"""name"""=value + pos := strings.Index(line[startIdx:], keyQuote) + if pos == -1 { + return "", -1, fmt.Errorf("missing closing key quote: %s", line) + } + pos += startIdx + + // Find key-value delimiter + i := strings.IndexAny(line[pos+startIdx:], "=:") + if i < 0 { + return "", -1, ErrDelimiterNotFound{line} + } + endIdx = pos + i + return strings.TrimSpace(line[startIdx:pos]), endIdx + startIdx + 1, nil + } + + endIdx = strings.IndexAny(line, "=:") + if endIdx < 0 { + return "", -1, ErrDelimiterNotFound{line} + } + return strings.TrimSpace(line[0:endIdx]), endIdx + 1, nil +} + +func (p *parser) readMultilines(line, val, valQuote string) (string, error) { + for { + data, err := p.readUntil('\n') + if err != nil { + return "", err + } + next := string(data) + + pos := strings.LastIndex(next, valQuote) + if pos > -1 { + val += next[:pos] + + comment, has := cleanComment([]byte(next[pos:])) + if has { + p.comment.Write(bytes.TrimSpace(comment)) + } + break + } + val += next + if p.isEOF { + return "", fmt.Errorf("missing closing key quote from '%s' to '%s'", line, next) + } + } + return val, nil +} + +func (p *parser) readContinuationLines(val string) (string, error) { + for { + data, err := p.readUntil('\n') + if err != nil { + return "", err + } + next := strings.TrimSpace(string(data)) + + if len(next) == 0 { + break + } + val += next + if val[len(val)-1] != '\\' { + break + } + val = val[:len(val)-1] + } + return val, nil +} + +// hasSurroundedQuote check if and only if the first and last characters +// are quotes \" or \'. +// It returns false if any other parts also contain same kind of quotes. +func hasSurroundedQuote(in string, quote byte) bool { + return len(in) > 2 && in[0] == quote && in[len(in)-1] == quote && + strings.IndexByte(in[1:], quote) == len(in)-2 +} + +func (p *parser) readValue(in []byte, ignoreContinuation bool) (string, error) { + line := strings.TrimLeftFunc(string(in), unicode.IsSpace) + if len(line) == 0 { + return "", nil + } + + var valQuote string + if len(line) > 3 && string(line[0:3]) == `"""` { + valQuote = `"""` + } else if line[0] == '`' { + valQuote = "`" + } + + if len(valQuote) > 0 { + startIdx := len(valQuote) + pos := strings.LastIndex(line[startIdx:], valQuote) + // Check for multi-line value + if pos == -1 { + return p.readMultilines(line, line[startIdx:], valQuote) + } + + return line[startIdx : pos+startIdx], nil + } + + // Won't be able to reach here if value only contains whitespace. + line = strings.TrimSpace(line) + + // Check continuation lines when desired. + if !ignoreContinuation && line[len(line)-1] == '\\' { + return p.readContinuationLines(line[:len(line)-1]) + } + + i := strings.IndexAny(line, "#;") + if i > -1 { + p.comment.WriteString(line[i:]) + line = strings.TrimSpace(line[:i]) + } + + // Trim single quotes + if hasSurroundedQuote(line, '\'') || + hasSurroundedQuote(line, '"') { + line = line[1 : len(line)-1] + } + return line, nil +} + +// parse parses data through an io.Reader. +func (f *File) parse(reader io.Reader) (err error) { + p := newParser(reader) + if err = p.BOM(); err != nil { + return fmt.Errorf("BOM: %v", err) + } + + // Ignore error because default section name is never empty string. + section, _ := f.NewSection(DEFAULT_SECTION) + + var line []byte + for !p.isEOF { + line, err = p.readUntil('\n') + if err != nil { + return err + } + + line = bytes.TrimLeftFunc(line, unicode.IsSpace) + if len(line) == 0 { + continue + } + + // Comments + if line[0] == '#' || line[0] == ';' { + // Note: we do not care ending line break, + // it is needed for adding second line, + // so just clean it once at the end when set to value. + p.comment.Write(line) + continue + } + + // Section + if line[0] == '[' { + // Read to the next ']' (TODO: support quoted strings) + // TODO(unknwon): use LastIndexByte when stop supporting Go1.4 + closeIdx := bytes.LastIndex(line, []byte("]")) + if closeIdx == -1 { + return fmt.Errorf("unclosed section: %s", line) + } + + name := string(line[1:closeIdx]) + section, err = f.NewSection(name) + if err != nil { + return err + } + + comment, has := cleanComment(line[closeIdx+1:]) + if has { + p.comment.Write(comment) + } + + section.Comment = strings.TrimSpace(p.comment.String()) + + // Reset aotu-counter and comments + p.comment.Reset() + p.count = 1 + continue + } + + kname, offset, err := readKeyName(line) + if err != nil { + // Treat as boolean key when desired, and whole line is key name. + if IsErrDelimiterNotFound(err) && f.options.AllowBooleanKeys { + key, err := section.NewKey(string(line), "true") + if err != nil { + return err + } + key.isBooleanType = true + key.Comment = strings.TrimSpace(p.comment.String()) + p.comment.Reset() + continue + } + return err + } + + // Auto increment. + isAutoIncr := false + if kname == "-" { + isAutoIncr = true + kname = "#" + strconv.Itoa(p.count) + p.count++ + } + + key, err := section.NewKey(kname, "") + if err != nil { + return err + } + key.isAutoIncrement = isAutoIncr + + value, err := p.readValue(line[offset:], f.options.IgnoreContinuation) + if err != nil { + return err + } + key.SetValue(value) + key.Comment = strings.TrimSpace(p.comment.String()) + p.comment.Reset() + } + return nil +} diff --git a/vendor/github.com/go-ini/ini/section.go b/vendor/github.com/go-ini/ini/section.go new file mode 100644 index 00000000..bbb73caf --- /dev/null +++ b/vendor/github.com/go-ini/ini/section.go @@ -0,0 +1,206 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "errors" + "fmt" + "strings" +) + +// Section represents a config section. +type Section struct { + f *File + Comment string + name string + keys map[string]*Key + keyList []string + keysHash map[string]string +} + +func newSection(f *File, name string) *Section { + return &Section{f, "", name, make(map[string]*Key), make([]string, 0, 10), make(map[string]string)} +} + +// Name returns name of Section. +func (s *Section) Name() string { + return s.name +} + +// NewKey creates a new key to given section. +func (s *Section) NewKey(name, val string) (*Key, error) { + if len(name) == 0 { + return nil, errors.New("error creating new key: empty key name") + } else if s.f.options.Insensitive { + name = strings.ToLower(name) + } + + if s.f.BlockMode { + s.f.lock.Lock() + defer s.f.lock.Unlock() + } + + if inSlice(name, s.keyList) { + s.keys[name].value = val + return s.keys[name], nil + } + + s.keyList = append(s.keyList, name) + s.keys[name] = &Key{ + s: s, + name: name, + value: val, + } + s.keysHash[name] = val + return s.keys[name], nil +} + +// GetKey returns key in section by given name. +func (s *Section) GetKey(name string) (*Key, error) { + // FIXME: change to section level lock? + if s.f.BlockMode { + s.f.lock.RLock() + } + if s.f.options.Insensitive { + name = strings.ToLower(name) + } + key := s.keys[name] + if s.f.BlockMode { + s.f.lock.RUnlock() + } + + if key == nil { + // Check if it is a child-section. + sname := s.name + for { + if i := strings.LastIndex(sname, "."); i > -1 { + sname = sname[:i] + sec, err := s.f.GetSection(sname) + if err != nil { + continue + } + return sec.GetKey(name) + } else { + break + } + } + return nil, fmt.Errorf("error when getting key of section '%s': key '%s' not exists", s.name, name) + } + return key, nil +} + +// HasKey returns true if section contains a key with given name. +func (s *Section) HasKey(name string) bool { + key, _ := s.GetKey(name) + return key != nil +} + +// Haskey is a backwards-compatible name for HasKey. +func (s *Section) Haskey(name string) bool { + return s.HasKey(name) +} + +// HasValue returns true if section contains given raw value. +func (s *Section) HasValue(value string) bool { + if s.f.BlockMode { + s.f.lock.RLock() + defer s.f.lock.RUnlock() + } + + for _, k := range s.keys { + if value == k.value { + return true + } + } + return false +} + +// Key assumes named Key exists in section and returns a zero-value when not. +func (s *Section) Key(name string) *Key { + key, err := s.GetKey(name) + if err != nil { + // It's OK here because the only possible error is empty key name, + // but if it's empty, this piece of code won't be executed. + key, _ = s.NewKey(name, "") + return key + } + return key +} + +// Keys returns list of keys of section. +func (s *Section) Keys() []*Key { + keys := make([]*Key, len(s.keyList)) + for i := range s.keyList { + keys[i] = s.Key(s.keyList[i]) + } + return keys +} + +// ParentKeys returns list of keys of parent section. +func (s *Section) ParentKeys() []*Key { + var parentKeys []*Key + sname := s.name + for { + if i := strings.LastIndex(sname, "."); i > -1 { + sname = sname[:i] + sec, err := s.f.GetSection(sname) + if err != nil { + continue + } + parentKeys = append(parentKeys, sec.Keys()...) + } else { + break + } + + } + return parentKeys +} + +// KeyStrings returns list of key names of section. +func (s *Section) KeyStrings() []string { + list := make([]string, len(s.keyList)) + copy(list, s.keyList) + return list +} + +// KeysHash returns keys hash consisting of names and values. +func (s *Section) KeysHash() map[string]string { + if s.f.BlockMode { + s.f.lock.RLock() + defer s.f.lock.RUnlock() + } + + hash := map[string]string{} + for key, value := range s.keysHash { + hash[key] = value + } + return hash +} + +// DeleteKey deletes a key from section. +func (s *Section) DeleteKey(name string) { + if s.f.BlockMode { + s.f.lock.Lock() + defer s.f.lock.Unlock() + } + + for i, k := range s.keyList { + if k == name { + s.keyList = append(s.keyList[:i], s.keyList[i+1:]...) + delete(s.keys, name) + return + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/struct.go b/vendor/github.com/go-ini/ini/struct.go similarity index 62% rename from vendor/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/struct.go rename to vendor/github.com/go-ini/ini/struct.go index c1184371..d00fb4b8 100644 --- a/vendor/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/struct.go +++ b/vendor/github.com/go-ini/ini/struct.go @@ -19,6 +19,7 @@ import ( "errors" "fmt" "reflect" + "strings" "time" "unicode" ) @@ -76,6 +77,59 @@ func parseDelim(actual string) string { var reflectTime = reflect.TypeOf(time.Now()).Kind() +// setSliceWithProperType sets proper values to slice based on its type. +func setSliceWithProperType(key *Key, field reflect.Value, delim string) error { + strs := key.Strings(delim) + numVals := len(strs) + if numVals == 0 { + return nil + } + + var vals interface{} + + sliceOf := field.Type().Elem().Kind() + switch sliceOf { + case reflect.String: + vals = strs + case reflect.Int: + vals = key.Ints(delim) + case reflect.Int64: + vals = key.Int64s(delim) + case reflect.Uint: + vals = key.Uints(delim) + case reflect.Uint64: + vals = key.Uint64s(delim) + case reflect.Float64: + vals = key.Float64s(delim) + case reflectTime: + vals = key.Times(delim) + default: + return fmt.Errorf("unsupported type '[]%s'", sliceOf) + } + + slice := reflect.MakeSlice(field.Type(), numVals, numVals) + for i := 0; i < numVals; i++ { + switch sliceOf { + case reflect.String: + slice.Index(i).Set(reflect.ValueOf(vals.([]string)[i])) + case reflect.Int: + slice.Index(i).Set(reflect.ValueOf(vals.([]int)[i])) + case reflect.Int64: + slice.Index(i).Set(reflect.ValueOf(vals.([]int64)[i])) + case reflect.Uint: + slice.Index(i).Set(reflect.ValueOf(vals.([]uint)[i])) + case reflect.Uint64: + slice.Index(i).Set(reflect.ValueOf(vals.([]uint64)[i])) + case reflect.Float64: + slice.Index(i).Set(reflect.ValueOf(vals.([]float64)[i])) + case reflectTime: + slice.Index(i).Set(reflect.ValueOf(vals.([]time.Time)[i])) + } + } + field.Set(slice) + return nil +} + // setWithProperType sets proper value to field based on its type, // but it does not return error for failing parsing, // because we want to use default value that is already assigned to strcut. @@ -94,20 +148,22 @@ func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim stri field.SetBool(boolVal) case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: durationVal, err := key.Duration() - if err == nil { + // Skip zero value + if err == nil && int(durationVal) > 0 { field.Set(reflect.ValueOf(durationVal)) return nil } intVal, err := key.Int64() - if err != nil { + if err != nil || intVal == 0 { return nil } field.SetInt(intVal) // byte is an alias for uint8, so supporting uint8 breaks support for byte case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64: durationVal, err := key.Duration() - if err == nil { + // Skip zero value + if err == nil && int(durationVal) > 0 { field.Set(reflect.ValueOf(durationVal)) return nil } @@ -131,29 +187,7 @@ func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim stri } field.Set(reflect.ValueOf(timeVal)) case reflect.Slice: - vals := key.Strings(delim) - numVals := len(vals) - if numVals == 0 { - return nil - } - - sliceOf := field.Type().Elem().Kind() - - var times []time.Time - if sliceOf == reflectTime { - times = key.Times(delim) - } - - slice := reflect.MakeSlice(field.Type(), numVals, numVals) - for i := 0; i < numVals; i++ { - switch sliceOf { - case reflectTime: - slice.Index(i).Set(reflect.ValueOf(times[i])) - default: - slice.Index(i).Set(reflect.ValueOf(vals[i])) - } - } - field.Set(slice) + return setSliceWithProperType(key, field, delim) default: return fmt.Errorf("unsupported type '%s'", t) } @@ -175,7 +209,8 @@ func (s *Section) mapTo(val reflect.Value) error { continue } - fieldName := s.parseFieldName(tpField.Name, tag) + opts := strings.SplitN(tag, ",", 2) // strip off possible omitempty + fieldName := s.parseFieldName(tpField.Name, opts[0]) if len(fieldName) == 0 || !field.CanSet() { continue } @@ -238,40 +273,81 @@ func MapTo(v, source interface{}, others ...interface{}) error { return MapToWithMapper(v, nil, source, others...) } -// reflectWithProperType does the opposite thing with setWithProperType. +// reflectSliceWithProperType does the opposite thing as setSliceWithProperType. +func reflectSliceWithProperType(key *Key, field reflect.Value, delim string) error { + slice := field.Slice(0, field.Len()) + if field.Len() == 0 { + return nil + } + + var buf bytes.Buffer + sliceOf := field.Type().Elem().Kind() + for i := 0; i < field.Len(); i++ { + switch sliceOf { + case reflect.String: + buf.WriteString(slice.Index(i).String()) + case reflect.Int, reflect.Int64: + buf.WriteString(fmt.Sprint(slice.Index(i).Int())) + case reflect.Uint, reflect.Uint64: + buf.WriteString(fmt.Sprint(slice.Index(i).Uint())) + case reflect.Float64: + buf.WriteString(fmt.Sprint(slice.Index(i).Float())) + case reflectTime: + buf.WriteString(slice.Index(i).Interface().(time.Time).Format(time.RFC3339)) + default: + return fmt.Errorf("unsupported type '[]%s'", sliceOf) + } + buf.WriteString(delim) + } + key.SetValue(buf.String()[:buf.Len()-1]) + return nil +} + +// reflectWithProperType does the opposite thing as setWithProperType. func reflectWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string) error { switch t.Kind() { case reflect.String: key.SetValue(field.String()) - case reflect.Bool, - reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, - reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, - reflect.Float64, - reflectTime: - key.SetValue(fmt.Sprint(field)) + case reflect.Bool: + key.SetValue(fmt.Sprint(field.Bool())) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + key.SetValue(fmt.Sprint(field.Int())) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + key.SetValue(fmt.Sprint(field.Uint())) + case reflect.Float32, reflect.Float64: + key.SetValue(fmt.Sprint(field.Float())) + case reflectTime: + key.SetValue(fmt.Sprint(field.Interface().(time.Time).Format(time.RFC3339))) case reflect.Slice: - vals := field.Slice(0, field.Len()) - if field.Len() == 0 { - return nil - } - - var buf bytes.Buffer - isTime := fmt.Sprint(field.Type()) == "[]time.Time" - for i := 0; i < field.Len(); i++ { - if isTime { - buf.WriteString(vals.Index(i).Interface().(time.Time).Format(time.RFC3339)) - } else { - buf.WriteString(fmt.Sprint(vals.Index(i))) - } - buf.WriteString(delim) - } - key.SetValue(buf.String()[:buf.Len()-1]) + return reflectSliceWithProperType(key, field, delim) default: return fmt.Errorf("unsupported type '%s'", t) } return nil } +// CR: copied from encoding/json/encode.go with modifications of time.Time support. +// TODO: add more test coverage. +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflectTime: + return v.Interface().(time.Time).IsZero() + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + return false +} + func (s *Section) reflectFrom(val reflect.Value) error { if val.Kind() == reflect.Ptr { val = val.Elem() @@ -287,13 +363,18 @@ func (s *Section) reflectFrom(val reflect.Value) error { continue } - fieldName := s.parseFieldName(tpField.Name, tag) + opts := strings.SplitN(tag, ",", 2) + if len(opts) == 2 && opts[1] == "omitempty" && isEmptyValue(field) { + continue + } + + fieldName := s.parseFieldName(tpField.Name, opts[0]) if len(fieldName) == 0 || !field.CanSet() { continue } if (tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous) || - (tpField.Type.Kind() == reflect.Struct) { + (tpField.Type.Kind() == reflect.Struct && tpField.Type.Name() != "Time") { // Note: The only error here is section doesn't exist. sec, err := s.f.GetSection(fieldName) if err != nil { @@ -301,7 +382,7 @@ func (s *Section) reflectFrom(val reflect.Value) error { sec, _ = s.f.NewSection(fieldName) } if err = sec.reflectFrom(field); err != nil { - return fmt.Errorf("error reflecting field(%s): %v", fieldName, err) + return fmt.Errorf("error reflecting field (%s): %v", fieldName, err) } continue } @@ -312,7 +393,7 @@ func (s *Section) reflectFrom(val reflect.Value) error { key, _ = s.NewKey(fieldName, "") } if err = reflectWithProperType(tpField.Type, key, field, parseDelim(tpField.Tag.Get("delim"))); err != nil { - return fmt.Errorf("error reflecting field(%s): %v", fieldName, err) + return fmt.Errorf("error reflecting field (%s): %v", fieldName, err) } } diff --git a/vendor/github.com/golang/protobuf/proto/Makefile b/vendor/github.com/golang/protobuf/proto/Makefile deleted file mode 100644 index e2e0651a..00000000 --- a/vendor/github.com/golang/protobuf/proto/Makefile +++ /dev/null @@ -1,43 +0,0 @@ -# Go support for Protocol Buffers - Google's data interchange format -# -# Copyright 2010 The Go Authors. All rights reserved. -# https://github.com/golang/protobuf -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -install: - go install - -test: install generate-test-pbs - go test - - -generate-test-pbs: - make install - make -C testdata - protoc --go_out=Mtestdata/test.proto=github.com/golang/protobuf/proto/testdata,Mgoogle/protobuf/any.proto=github.com/golang/protobuf/ptypes/any:. proto3_proto/proto3.proto - make diff --git a/vendor/github.com/gorilla/context/.travis.yml b/vendor/github.com/gorilla/context/.travis.yml deleted file mode 100644 index d87d4657..00000000 --- a/vendor/github.com/gorilla/context/.travis.yml +++ /dev/null @@ -1,7 +0,0 @@ -language: go - -go: - - 1.0 - - 1.1 - - 1.2 - - tip diff --git a/vendor/github.com/gorilla/context/README.md b/vendor/github.com/gorilla/context/README.md deleted file mode 100644 index c60a31b0..00000000 --- a/vendor/github.com/gorilla/context/README.md +++ /dev/null @@ -1,7 +0,0 @@ -context -======= -[![Build Status](https://travis-ci.org/gorilla/context.png?branch=master)](https://travis-ci.org/gorilla/context) - -gorilla/context is a general purpose registry for global request variables. - -Read the full documentation here: http://www.gorillatoolkit.org/pkg/context diff --git a/vendor/github.com/gorilla/handlers/.travis.yml b/vendor/github.com/gorilla/handlers/.travis.yml deleted file mode 100644 index 354b7f8b..00000000 --- a/vendor/github.com/gorilla/handlers/.travis.yml +++ /dev/null @@ -1,8 +0,0 @@ -language: go - -go: - - 1.1 - - 1.2 - - 1.3 - - 1.4 - - tip diff --git a/vendor/github.com/gorilla/handlers/README.md b/vendor/github.com/gorilla/handlers/README.md deleted file mode 100644 index a340abe0..00000000 --- a/vendor/github.com/gorilla/handlers/README.md +++ /dev/null @@ -1,52 +0,0 @@ -gorilla/handlers -================ -[![GoDoc](https://godoc.org/github.com/gorilla/handlers?status.svg)](https://godoc.org/github.com/gorilla/handlers) [![Build Status](https://travis-ci.org/gorilla/handlers.svg?branch=master)](https://travis-ci.org/gorilla/handlers) - -Package handlers is a collection of handlers (aka "HTTP middleware") for use -with Go's `net/http` package (or any framework supporting `http.Handler`), including: - -* `LoggingHandler` for logging HTTP requests in the Apache [Common Log - Format](http://httpd.apache.org/docs/2.2/logs.html#common). -* `CombinedLoggingHandler` for logging HTTP requests in the Apache [Combined Log - Format](http://httpd.apache.org/docs/2.2/logs.html#combined) commonly used by - both Apache and nginx. -* `CompressHandler` for gzipping responses. -* `ContentTypeHandler` for validating requests against a list of accepted - content types. -* `MethodHandler` for matching HTTP methods against handlers in a - `map[string]http.Handler` -* `ProxyHeaders` for populating `r.RemoteAddr` and `r.URL.Scheme` based on the - `X-Forwarded-For`, `X-Real-IP`, `X-Forwarded-Proto` and RFC7239 `Forwarded` - headers when running a Go server behind a HTTP reverse proxy. -* `CanonicalHost` for re-directing to the preferred host when handling multiple - domains (i.e. multiple CNAME aliases). - -Other handlers are documented [on the Gorilla -website](http://www.gorillatoolkit.org/pkg/handlers). - -## Example - -A simple example using `handlers.LoggingHandler` and `handlers.CompressHandler`: - -```go -import ( - "net/http" - "github.com/gorilla/handlers" -) - -func main() { - r := http.NewServeMux() - - // Only log requests to our admin dashboard to stdout - r.Handle("/admin", handlers.LoggingHandler(os.Stdout, http.HandlerFunc(ShowAdminDashboard))) - r.HandleFunc("/", ShowIndex) - - // Wrap our server with our gzip handler to gzip compress all responses. - http.ListenAndServe(":8000", handlers.CompressHandler(r)) -} -``` - -## License - -BSD licensed. See the included LICENSE file for details. - diff --git a/vendor/github.com/gorilla/mux/.travis.yml b/vendor/github.com/gorilla/mux/.travis.yml deleted file mode 100644 index d87d4657..00000000 --- a/vendor/github.com/gorilla/mux/.travis.yml +++ /dev/null @@ -1,7 +0,0 @@ -language: go - -go: - - 1.0 - - 1.1 - - 1.2 - - tip diff --git a/vendor/github.com/gorilla/mux/README.md b/vendor/github.com/gorilla/mux/README.md deleted file mode 100644 index e60301b0..00000000 --- a/vendor/github.com/gorilla/mux/README.md +++ /dev/null @@ -1,7 +0,0 @@ -mux -=== -[![Build Status](https://travis-ci.org/gorilla/mux.png?branch=master)](https://travis-ci.org/gorilla/mux) - -gorilla/mux is a powerful URL router and dispatcher. - -Read the full documentation here: http://www.gorillatoolkit.org/pkg/mux diff --git a/vendor/github.com/inconshreveable/mousetrap/README.md b/vendor/github.com/inconshreveable/mousetrap/README.md deleted file mode 100644 index 7a950d17..00000000 --- a/vendor/github.com/inconshreveable/mousetrap/README.md +++ /dev/null @@ -1,23 +0,0 @@ -# mousetrap - -mousetrap is a tiny library that answers a single question. - -On a Windows machine, was the process invoked by someone double clicking on -the executable file while browsing in explorer? - -### Motivation - -Windows developers unfamiliar with command line tools will often "double-click" -the executable for a tool. Because most CLI tools print the help and then exit -when invoked without arguments, this is often very frustrating for those users. - -mousetrap provides a way to detect these invocations so that you can provide -more helpful behavior and instructions on how to run the CLI tool. To see what -this looks like, both from an organizational and a technical perspective, see -https://inconshreveable.com/09-09-2014/sweat-the-small-stuff/ - -### The interface - -The library exposes a single interface: - - func StartedByExplorer() (bool) diff --git a/vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/LICENSE b/vendor/github.com/jmespath/go-jmespath/LICENSE similarity index 100% rename from vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/LICENSE rename to vendor/github.com/jmespath/go-jmespath/LICENSE diff --git a/vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/api.go b/vendor/github.com/jmespath/go-jmespath/api.go similarity index 100% rename from vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/api.go rename to vendor/github.com/jmespath/go-jmespath/api.go diff --git a/vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go b/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go similarity index 100% rename from vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go rename to vendor/github.com/jmespath/go-jmespath/astnodetype_string.go diff --git a/vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/functions.go b/vendor/github.com/jmespath/go-jmespath/functions.go similarity index 100% rename from vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/functions.go rename to vendor/github.com/jmespath/go-jmespath/functions.go diff --git a/vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/interpreter.go b/vendor/github.com/jmespath/go-jmespath/interpreter.go similarity index 100% rename from vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/interpreter.go rename to vendor/github.com/jmespath/go-jmespath/interpreter.go diff --git a/vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/lexer.go b/vendor/github.com/jmespath/go-jmespath/lexer.go similarity index 100% rename from vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/lexer.go rename to vendor/github.com/jmespath/go-jmespath/lexer.go diff --git a/vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/parser.go b/vendor/github.com/jmespath/go-jmespath/parser.go similarity index 100% rename from vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/parser.go rename to vendor/github.com/jmespath/go-jmespath/parser.go diff --git a/vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/toktype_string.go b/vendor/github.com/jmespath/go-jmespath/toktype_string.go similarity index 100% rename from vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/toktype_string.go rename to vendor/github.com/jmespath/go-jmespath/toktype_string.go diff --git a/vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/util.go b/vendor/github.com/jmespath/go-jmespath/util.go similarity index 100% rename from vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/util.go rename to vendor/github.com/jmespath/go-jmespath/util.go diff --git a/vendor/github.com/miekg/dns/LICENSE b/vendor/github.com/miekg/dns/LICENSE new file mode 100644 index 00000000..5763fa7f --- /dev/null +++ b/vendor/github.com/miekg/dns/LICENSE @@ -0,0 +1,32 @@ +Extensions of the original work are copyright (c) 2011 Miek Gieben + +As this is fork of the official Go code the same license applies: + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/vendor/github.com/miekg/dns/client.go b/vendor/github.com/miekg/dns/client.go new file mode 100644 index 00000000..0db7f7bf --- /dev/null +++ b/vendor/github.com/miekg/dns/client.go @@ -0,0 +1,455 @@ +package dns + +// A client implementation. + +import ( + "bytes" + "crypto/tls" + "encoding/binary" + "io" + "net" + "time" +) + +const dnsTimeout time.Duration = 2 * time.Second +const tcpIdleTimeout time.Duration = 8 * time.Second + +// A Conn represents a connection to a DNS server. +type Conn struct { + net.Conn // a net.Conn holding the connection + UDPSize uint16 // minimum receive buffer for UDP messages + TsigSecret map[string]string // secret(s) for Tsig map[], zonename must be fully qualified + rtt time.Duration + t time.Time + tsigRequestMAC string +} + +// A Client defines parameters for a DNS client. +type Client struct { + Net string // if "tcp" or "tcp-tls" (DNS over TLS) a TCP query will be initiated, otherwise an UDP one (default is "" for UDP) + UDPSize uint16 // minimum receive buffer for UDP messages + TLSConfig *tls.Config // TLS connection configuration + Timeout time.Duration // a cumulative timeout for dial, write and read, defaults to 0 (disabled) - overrides DialTimeout, ReadTimeout and WriteTimeout when non-zero + DialTimeout time.Duration // net.DialTimeout, defaults to 2 seconds - overridden by Timeout when that value is non-zero + ReadTimeout time.Duration // net.Conn.SetReadTimeout value for connections, defaults to 2 seconds - overridden by Timeout when that value is non-zero + WriteTimeout time.Duration // net.Conn.SetWriteTimeout value for connections, defaults to 2 seconds - overridden by Timeout when that value is non-zero + TsigSecret map[string]string // secret(s) for Tsig map[], zonename must be fully qualified + SingleInflight bool // if true suppress multiple outstanding queries for the same Qname, Qtype and Qclass + group singleflight +} + +// Exchange performs a synchronous UDP query. It sends the message m to the address +// contained in a and waits for a reply. Exchange does not retry a failed query, nor +// will it fall back to TCP in case of truncation. +// See client.Exchange for more information on setting larger buffer sizes. +func Exchange(m *Msg, a string) (r *Msg, err error) { + var co *Conn + co, err = DialTimeout("udp", a, dnsTimeout) + if err != nil { + return nil, err + } + + defer co.Close() + + opt := m.IsEdns0() + // If EDNS0 is used use that for size. + if opt != nil && opt.UDPSize() >= MinMsgSize { + co.UDPSize = opt.UDPSize() + } + + co.SetWriteDeadline(time.Now().Add(dnsTimeout)) + if err = co.WriteMsg(m); err != nil { + return nil, err + } + + co.SetReadDeadline(time.Now().Add(dnsTimeout)) + r, err = co.ReadMsg() + if err == nil && r.Id != m.Id { + err = ErrId + } + return r, err +} + +// ExchangeConn performs a synchronous query. It sends the message m via the connection +// c and waits for a reply. The connection c is not closed by ExchangeConn. +// This function is going away, but can easily be mimicked: +// +// co := &dns.Conn{Conn: c} // c is your net.Conn +// co.WriteMsg(m) +// in, _ := co.ReadMsg() +// co.Close() +// +func ExchangeConn(c net.Conn, m *Msg) (r *Msg, err error) { + println("dns: this function is deprecated") + co := new(Conn) + co.Conn = c + if err = co.WriteMsg(m); err != nil { + return nil, err + } + r, err = co.ReadMsg() + if err == nil && r.Id != m.Id { + err = ErrId + } + return r, err +} + +// Exchange performs a synchronous query. It sends the message m to the address +// contained in a and waits for a reply. Basic use pattern with a *dns.Client: +// +// c := new(dns.Client) +// in, rtt, err := c.Exchange(message, "127.0.0.1:53") +// +// Exchange does not retry a failed query, nor will it fall back to TCP in +// case of truncation. +// It is up to the caller to create a message that allows for larger responses to be +// returned. Specifically this means adding an EDNS0 OPT RR that will advertise a larger +// buffer, see SetEdns0. Messsages without an OPT RR will fallback to the historic limit +// of 512 bytes. +func (c *Client) Exchange(m *Msg, a string) (r *Msg, rtt time.Duration, err error) { + if !c.SingleInflight { + return c.exchange(m, a) + } + // This adds a bunch of garbage, TODO(miek). + t := "nop" + if t1, ok := TypeToString[m.Question[0].Qtype]; ok { + t = t1 + } + cl := "nop" + if cl1, ok := ClassToString[m.Question[0].Qclass]; ok { + cl = cl1 + } + r, rtt, err, shared := c.group.Do(m.Question[0].Name+t+cl, func() (*Msg, time.Duration, error) { + return c.exchange(m, a) + }) + if err != nil { + return r, rtt, err + } + if shared { + return r.Copy(), rtt, nil + } + return r, rtt, nil +} + +func (c *Client) dialTimeout() time.Duration { + if c.Timeout != 0 { + return c.Timeout + } + if c.DialTimeout != 0 { + return c.DialTimeout + } + return dnsTimeout +} + +func (c *Client) readTimeout() time.Duration { + if c.ReadTimeout != 0 { + return c.ReadTimeout + } + return dnsTimeout +} + +func (c *Client) writeTimeout() time.Duration { + if c.WriteTimeout != 0 { + return c.WriteTimeout + } + return dnsTimeout +} + +func (c *Client) exchange(m *Msg, a string) (r *Msg, rtt time.Duration, err error) { + var co *Conn + network := "udp" + tls := false + + switch c.Net { + case "tcp-tls": + network = "tcp" + tls = true + case "tcp4-tls": + network = "tcp4" + tls = true + case "tcp6-tls": + network = "tcp6" + tls = true + default: + if c.Net != "" { + network = c.Net + } + } + + var deadline time.Time + if c.Timeout != 0 { + deadline = time.Now().Add(c.Timeout) + } + + if tls { + co, err = DialTimeoutWithTLS(network, a, c.TLSConfig, c.dialTimeout()) + } else { + co, err = DialTimeout(network, a, c.dialTimeout()) + } + + if err != nil { + return nil, 0, err + } + defer co.Close() + + opt := m.IsEdns0() + // If EDNS0 is used use that for size. + if opt != nil && opt.UDPSize() >= MinMsgSize { + co.UDPSize = opt.UDPSize() + } + // Otherwise use the client's configured UDP size. + if opt == nil && c.UDPSize >= MinMsgSize { + co.UDPSize = c.UDPSize + } + + co.TsigSecret = c.TsigSecret + co.SetWriteDeadline(deadlineOrTimeout(deadline, c.writeTimeout())) + if err = co.WriteMsg(m); err != nil { + return nil, 0, err + } + + co.SetReadDeadline(deadlineOrTimeout(deadline, c.readTimeout())) + r, err = co.ReadMsg() + if err == nil && r.Id != m.Id { + err = ErrId + } + return r, co.rtt, err +} + +// ReadMsg reads a message from the connection co. +// If the received message contains a TSIG record the transaction +// signature is verified. +func (co *Conn) ReadMsg() (*Msg, error) { + p, err := co.ReadMsgHeader(nil) + if err != nil { + return nil, err + } + + m := new(Msg) + if err := m.Unpack(p); err != nil { + // If ErrTruncated was returned, we still want to allow the user to use + // the message, but naively they can just check err if they don't want + // to use a truncated message + if err == ErrTruncated { + return m, err + } + return nil, err + } + if t := m.IsTsig(); t != nil { + if _, ok := co.TsigSecret[t.Hdr.Name]; !ok { + return m, ErrSecret + } + // Need to work on the original message p, as that was used to calculate the tsig. + err = TsigVerify(p, co.TsigSecret[t.Hdr.Name], co.tsigRequestMAC, false) + } + return m, err +} + +// ReadMsgHeader reads a DNS message, parses and populates hdr (when hdr is not nil). +// Returns message as a byte slice to be parsed with Msg.Unpack later on. +// Note that error handling on the message body is not possible as only the header is parsed. +func (co *Conn) ReadMsgHeader(hdr *Header) ([]byte, error) { + var ( + p []byte + n int + err error + ) + + switch t := co.Conn.(type) { + case *net.TCPConn, *tls.Conn: + r := t.(io.Reader) + + // First two bytes specify the length of the entire message. + l, err := tcpMsgLen(r) + if err != nil { + return nil, err + } + p = make([]byte, l) + n, err = tcpRead(r, p) + co.rtt = time.Since(co.t) + default: + if co.UDPSize > MinMsgSize { + p = make([]byte, co.UDPSize) + } else { + p = make([]byte, MinMsgSize) + } + n, err = co.Read(p) + co.rtt = time.Since(co.t) + } + + if err != nil { + return nil, err + } else if n < headerSize { + return nil, ErrShortRead + } + + p = p[:n] + if hdr != nil { + dh, _, err := unpackMsgHdr(p, 0) + if err != nil { + return nil, err + } + *hdr = dh + } + return p, err +} + +// tcpMsgLen is a helper func to read first two bytes of stream as uint16 packet length. +func tcpMsgLen(t io.Reader) (int, error) { + p := []byte{0, 0} + n, err := t.Read(p) + if err != nil { + return 0, err + } + if n != 2 { + return 0, ErrShortRead + } + l := binary.BigEndian.Uint16(p) + if l == 0 { + return 0, ErrShortRead + } + return int(l), nil +} + +// tcpRead calls TCPConn.Read enough times to fill allocated buffer. +func tcpRead(t io.Reader, p []byte) (int, error) { + n, err := t.Read(p) + if err != nil { + return n, err + } + for n < len(p) { + j, err := t.Read(p[n:]) + if err != nil { + return n, err + } + n += j + } + return n, err +} + +// Read implements the net.Conn read method. +func (co *Conn) Read(p []byte) (n int, err error) { + if co.Conn == nil { + return 0, ErrConnEmpty + } + if len(p) < 2 { + return 0, io.ErrShortBuffer + } + switch t := co.Conn.(type) { + case *net.TCPConn, *tls.Conn: + r := t.(io.Reader) + + l, err := tcpMsgLen(r) + if err != nil { + return 0, err + } + if l > len(p) { + return int(l), io.ErrShortBuffer + } + return tcpRead(r, p[:l]) + } + // UDP connection + n, err = co.Conn.Read(p) + if err != nil { + return n, err + } + return n, err +} + +// WriteMsg sends a message through the connection co. +// If the message m contains a TSIG record the transaction +// signature is calculated. +func (co *Conn) WriteMsg(m *Msg) (err error) { + var out []byte + if t := m.IsTsig(); t != nil { + mac := "" + if _, ok := co.TsigSecret[t.Hdr.Name]; !ok { + return ErrSecret + } + out, mac, err = TsigGenerate(m, co.TsigSecret[t.Hdr.Name], co.tsigRequestMAC, false) + // Set for the next read, although only used in zone transfers + co.tsigRequestMAC = mac + } else { + out, err = m.Pack() + } + if err != nil { + return err + } + co.t = time.Now() + if _, err = co.Write(out); err != nil { + return err + } + return nil +} + +// Write implements the net.Conn Write method. +func (co *Conn) Write(p []byte) (n int, err error) { + switch t := co.Conn.(type) { + case *net.TCPConn, *tls.Conn: + w := t.(io.Writer) + + lp := len(p) + if lp < 2 { + return 0, io.ErrShortBuffer + } + if lp > MaxMsgSize { + return 0, &Error{err: "message too large"} + } + l := make([]byte, 2, lp+2) + binary.BigEndian.PutUint16(l, uint16(lp)) + p = append(l, p...) + n, err := io.Copy(w, bytes.NewReader(p)) + return int(n), err + } + n, err = co.Conn.(*net.UDPConn).Write(p) + return n, err +} + +// Dial connects to the address on the named network. +func Dial(network, address string) (conn *Conn, err error) { + conn = new(Conn) + conn.Conn, err = net.Dial(network, address) + if err != nil { + return nil, err + } + return conn, nil +} + +// DialTimeout acts like Dial but takes a timeout. +func DialTimeout(network, address string, timeout time.Duration) (conn *Conn, err error) { + conn = new(Conn) + conn.Conn, err = net.DialTimeout(network, address, timeout) + if err != nil { + return nil, err + } + return conn, nil +} + +// DialWithTLS connects to the address on the named network with TLS. +func DialWithTLS(network, address string, tlsConfig *tls.Config) (conn *Conn, err error) { + conn = new(Conn) + conn.Conn, err = tls.Dial(network, address, tlsConfig) + if err != nil { + return nil, err + } + return conn, nil +} + +// DialTimeoutWithTLS acts like DialWithTLS but takes a timeout. +func DialTimeoutWithTLS(network, address string, tlsConfig *tls.Config, timeout time.Duration) (conn *Conn, err error) { + var dialer net.Dialer + dialer.Timeout = timeout + + conn = new(Conn) + conn.Conn, err = tls.DialWithDialer(&dialer, network, address, tlsConfig) + if err != nil { + return nil, err + } + return conn, nil +} + +func deadlineOrTimeout(deadline time.Time, timeout time.Duration) time.Time { + if deadline.IsZero() { + return time.Now().Add(timeout) + } + return deadline +} diff --git a/vendor/github.com/miekg/dns/clientconfig.go b/vendor/github.com/miekg/dns/clientconfig.go new file mode 100644 index 00000000..cfa9ad0b --- /dev/null +++ b/vendor/github.com/miekg/dns/clientconfig.go @@ -0,0 +1,99 @@ +package dns + +import ( + "bufio" + "os" + "strconv" + "strings" +) + +// ClientConfig wraps the contents of the /etc/resolv.conf file. +type ClientConfig struct { + Servers []string // servers to use + Search []string // suffixes to append to local name + Port string // what port to use + Ndots int // number of dots in name to trigger absolute lookup + Timeout int // seconds before giving up on packet + Attempts int // lost packets before giving up on server, not used in the package dns +} + +// ClientConfigFromFile parses a resolv.conf(5) like file and returns +// a *ClientConfig. +func ClientConfigFromFile(resolvconf string) (*ClientConfig, error) { + file, err := os.Open(resolvconf) + if err != nil { + return nil, err + } + defer file.Close() + c := new(ClientConfig) + scanner := bufio.NewScanner(file) + c.Servers = make([]string, 0) + c.Search = make([]string, 0) + c.Port = "53" + c.Ndots = 1 + c.Timeout = 5 + c.Attempts = 2 + + for scanner.Scan() { + if err := scanner.Err(); err != nil { + return nil, err + } + line := scanner.Text() + f := strings.Fields(line) + if len(f) < 1 { + continue + } + switch f[0] { + case "nameserver": // add one name server + if len(f) > 1 { + // One more check: make sure server name is + // just an IP address. Otherwise we need DNS + // to look it up. + name := f[1] + c.Servers = append(c.Servers, name) + } + + case "domain": // set search path to just this domain + if len(f) > 1 { + c.Search = make([]string, 1) + c.Search[0] = f[1] + } else { + c.Search = make([]string, 0) + } + + case "search": // set search path to given servers + c.Search = make([]string, len(f)-1) + for i := 0; i < len(c.Search); i++ { + c.Search[i] = f[i+1] + } + + case "options": // magic options + for i := 1; i < len(f); i++ { + s := f[i] + switch { + case len(s) >= 6 && s[:6] == "ndots:": + n, _ := strconv.Atoi(s[6:]) + if n < 1 { + n = 1 + } + c.Ndots = n + case len(s) >= 8 && s[:8] == "timeout:": + n, _ := strconv.Atoi(s[8:]) + if n < 1 { + n = 1 + } + c.Timeout = n + case len(s) >= 8 && s[:9] == "attempts:": + n, _ := strconv.Atoi(s[9:]) + if n < 1 { + n = 1 + } + c.Attempts = n + case s == "rotate": + /* not imp */ + } + } + } + } + return c, nil +} diff --git a/vendor/github.com/miekg/dns/dane.go b/vendor/github.com/miekg/dns/dane.go new file mode 100644 index 00000000..cdaa833f --- /dev/null +++ b/vendor/github.com/miekg/dns/dane.go @@ -0,0 +1,44 @@ +package dns + +import ( + "crypto/sha256" + "crypto/sha512" + "crypto/x509" + "encoding/hex" + "errors" + "io" +) + +// CertificateToDANE converts a certificate to a hex string as used in the TLSA or SMIMEA records. +func CertificateToDANE(selector, matchingType uint8, cert *x509.Certificate) (string, error) { + switch matchingType { + case 0: + switch selector { + case 0: + return hex.EncodeToString(cert.Raw), nil + case 1: + return hex.EncodeToString(cert.RawSubjectPublicKeyInfo), nil + } + case 1: + h := sha256.New() + switch selector { + case 0: + io.WriteString(h, string(cert.Raw)) + return hex.EncodeToString(h.Sum(nil)), nil + case 1: + io.WriteString(h, string(cert.RawSubjectPublicKeyInfo)) + return hex.EncodeToString(h.Sum(nil)), nil + } + case 2: + h := sha512.New() + switch selector { + case 0: + io.WriteString(h, string(cert.Raw)) + return hex.EncodeToString(h.Sum(nil)), nil + case 1: + io.WriteString(h, string(cert.RawSubjectPublicKeyInfo)) + return hex.EncodeToString(h.Sum(nil)), nil + } + } + return "", errors.New("dns: bad MatchingType or Selector") +} diff --git a/vendor/github.com/miekg/dns/defaults.go b/vendor/github.com/miekg/dns/defaults.go new file mode 100644 index 00000000..cf456165 --- /dev/null +++ b/vendor/github.com/miekg/dns/defaults.go @@ -0,0 +1,282 @@ +package dns + +import ( + "errors" + "net" + "strconv" +) + +const hexDigit = "0123456789abcdef" + +// Everything is assumed in ClassINET. + +// SetReply creates a reply message from a request message. +func (dns *Msg) SetReply(request *Msg) *Msg { + dns.Id = request.Id + dns.RecursionDesired = request.RecursionDesired // Copy rd bit + dns.Response = true + dns.Opcode = OpcodeQuery + dns.Rcode = RcodeSuccess + if len(request.Question) > 0 { + dns.Question = make([]Question, 1) + dns.Question[0] = request.Question[0] + } + return dns +} + +// SetQuestion creates a question message, it sets the Question +// section, generates an Id and sets the RecursionDesired (RD) +// bit to true. +func (dns *Msg) SetQuestion(z string, t uint16) *Msg { + dns.Id = Id() + dns.RecursionDesired = true + dns.Question = make([]Question, 1) + dns.Question[0] = Question{z, t, ClassINET} + return dns +} + +// SetNotify creates a notify message, it sets the Question +// section, generates an Id and sets the Authoritative (AA) +// bit to true. +func (dns *Msg) SetNotify(z string) *Msg { + dns.Opcode = OpcodeNotify + dns.Authoritative = true + dns.Id = Id() + dns.Question = make([]Question, 1) + dns.Question[0] = Question{z, TypeSOA, ClassINET} + return dns +} + +// SetRcode creates an error message suitable for the request. +func (dns *Msg) SetRcode(request *Msg, rcode int) *Msg { + dns.SetReply(request) + dns.Rcode = rcode + return dns +} + +// SetRcodeFormatError creates a message with FormError set. +func (dns *Msg) SetRcodeFormatError(request *Msg) *Msg { + dns.Rcode = RcodeFormatError + dns.Opcode = OpcodeQuery + dns.Response = true + dns.Authoritative = false + dns.Id = request.Id + return dns +} + +// SetUpdate makes the message a dynamic update message. It +// sets the ZONE section to: z, TypeSOA, ClassINET. +func (dns *Msg) SetUpdate(z string) *Msg { + dns.Id = Id() + dns.Response = false + dns.Opcode = OpcodeUpdate + dns.Compress = false // BIND9 cannot handle compression + dns.Question = make([]Question, 1) + dns.Question[0] = Question{z, TypeSOA, ClassINET} + return dns +} + +// SetIxfr creates message for requesting an IXFR. +func (dns *Msg) SetIxfr(z string, serial uint32, ns, mbox string) *Msg { + dns.Id = Id() + dns.Question = make([]Question, 1) + dns.Ns = make([]RR, 1) + s := new(SOA) + s.Hdr = RR_Header{z, TypeSOA, ClassINET, defaultTtl, 0} + s.Serial = serial + s.Ns = ns + s.Mbox = mbox + dns.Question[0] = Question{z, TypeIXFR, ClassINET} + dns.Ns[0] = s + return dns +} + +// SetAxfr creates message for requesting an AXFR. +func (dns *Msg) SetAxfr(z string) *Msg { + dns.Id = Id() + dns.Question = make([]Question, 1) + dns.Question[0] = Question{z, TypeAXFR, ClassINET} + return dns +} + +// SetTsig appends a TSIG RR to the message. +// This is only a skeleton TSIG RR that is added as the last RR in the +// additional section. The Tsig is calculated when the message is being send. +func (dns *Msg) SetTsig(z, algo string, fudge, timesigned int64) *Msg { + t := new(TSIG) + t.Hdr = RR_Header{z, TypeTSIG, ClassANY, 0, 0} + t.Algorithm = algo + t.Fudge = 300 + t.TimeSigned = uint64(timesigned) + t.OrigId = dns.Id + dns.Extra = append(dns.Extra, t) + return dns +} + +// SetEdns0 appends a EDNS0 OPT RR to the message. +// TSIG should always the last RR in a message. +func (dns *Msg) SetEdns0(udpsize uint16, do bool) *Msg { + e := new(OPT) + e.Hdr.Name = "." + e.Hdr.Rrtype = TypeOPT + e.SetUDPSize(udpsize) + if do { + e.SetDo() + } + dns.Extra = append(dns.Extra, e) + return dns +} + +// IsTsig checks if the message has a TSIG record as the last record +// in the additional section. It returns the TSIG record found or nil. +func (dns *Msg) IsTsig() *TSIG { + if len(dns.Extra) > 0 { + if dns.Extra[len(dns.Extra)-1].Header().Rrtype == TypeTSIG { + return dns.Extra[len(dns.Extra)-1].(*TSIG) + } + } + return nil +} + +// IsEdns0 checks if the message has a EDNS0 (OPT) record, any EDNS0 +// record in the additional section will do. It returns the OPT record +// found or nil. +func (dns *Msg) IsEdns0() *OPT { + // EDNS0 is at the end of the additional section, start there. + // We might want to change this to *only* look at the last two + // records. So we see TSIG and/or OPT - this a slightly bigger + // change though. + for i := len(dns.Extra) - 1; i >= 0; i-- { + if dns.Extra[i].Header().Rrtype == TypeOPT { + return dns.Extra[i].(*OPT) + } + } + return nil +} + +// IsDomainName checks if s is a valid domain name, it returns the number of +// labels and true, when a domain name is valid. Note that non fully qualified +// domain name is considered valid, in this case the last label is counted in +// the number of labels. When false is returned the number of labels is not +// defined. Also note that this function is extremely liberal; almost any +// string is a valid domain name as the DNS is 8 bit protocol. It checks if each +// label fits in 63 characters, but there is no length check for the entire +// string s. I.e. a domain name longer than 255 characters is considered valid. +func IsDomainName(s string) (labels int, ok bool) { + _, labels, err := packDomainName(s, nil, 0, nil, false) + return labels, err == nil +} + +// IsSubDomain checks if child is indeed a child of the parent. If child and parent +// are the same domain true is returned as well. +func IsSubDomain(parent, child string) bool { + // Entire child is contained in parent + return CompareDomainName(parent, child) == CountLabel(parent) +} + +// IsMsg sanity checks buf and returns an error if it isn't a valid DNS packet. +// The checking is performed on the binary payload. +func IsMsg(buf []byte) error { + // Header + if len(buf) < 12 { + return errors.New("dns: bad message header") + } + // Header: Opcode + // TODO(miek): more checks here, e.g. check all header bits. + return nil +} + +// IsFqdn checks if a domain name is fully qualified. +func IsFqdn(s string) bool { + l := len(s) + if l == 0 { + return false + } + return s[l-1] == '.' +} + +// IsRRset checks if a set of RRs is a valid RRset as defined by RFC 2181. +// This means the RRs need to have the same type, name, and class. Returns true +// if the RR set is valid, otherwise false. +func IsRRset(rrset []RR) bool { + if len(rrset) == 0 { + return false + } + if len(rrset) == 1 { + return true + } + rrHeader := rrset[0].Header() + rrType := rrHeader.Rrtype + rrClass := rrHeader.Class + rrName := rrHeader.Name + + for _, rr := range rrset[1:] { + curRRHeader := rr.Header() + if curRRHeader.Rrtype != rrType || curRRHeader.Class != rrClass || curRRHeader.Name != rrName { + // Mismatch between the records, so this is not a valid rrset for + //signing/verifying + return false + } + } + + return true +} + +// Fqdn return the fully qualified domain name from s. +// If s is already fully qualified, it behaves as the identity function. +func Fqdn(s string) string { + if IsFqdn(s) { + return s + } + return s + "." +} + +// Copied from the official Go code. + +// ReverseAddr returns the in-addr.arpa. or ip6.arpa. hostname of the IP +// address suitable for reverse DNS (PTR) record lookups or an error if it fails +// to parse the IP address. +func ReverseAddr(addr string) (arpa string, err error) { + ip := net.ParseIP(addr) + if ip == nil { + return "", &Error{err: "unrecognized address: " + addr} + } + if ip.To4() != nil { + return strconv.Itoa(int(ip[15])) + "." + strconv.Itoa(int(ip[14])) + "." + strconv.Itoa(int(ip[13])) + "." + + strconv.Itoa(int(ip[12])) + ".in-addr.arpa.", nil + } + // Must be IPv6 + buf := make([]byte, 0, len(ip)*4+len("ip6.arpa.")) + // Add it, in reverse, to the buffer + for i := len(ip) - 1; i >= 0; i-- { + v := ip[i] + buf = append(buf, hexDigit[v&0xF]) + buf = append(buf, '.') + buf = append(buf, hexDigit[v>>4]) + buf = append(buf, '.') + } + // Append "ip6.arpa." and return (buf already has the final .) + buf = append(buf, "ip6.arpa."...) + return string(buf), nil +} + +// String returns the string representation for the type t. +func (t Type) String() string { + if t1, ok := TypeToString[uint16(t)]; ok { + return t1 + } + return "TYPE" + strconv.Itoa(int(t)) +} + +// String returns the string representation for the class c. +func (c Class) String() string { + if c1, ok := ClassToString[uint16(c)]; ok { + return c1 + } + return "CLASS" + strconv.Itoa(int(c)) +} + +// String returns the string representation for the name n. +func (n Name) String() string { + return sprintName(string(n)) +} diff --git a/vendor/github.com/miekg/dns/dns.go b/vendor/github.com/miekg/dns/dns.go new file mode 100644 index 00000000..b3292287 --- /dev/null +++ b/vendor/github.com/miekg/dns/dns.go @@ -0,0 +1,104 @@ +package dns + +import "strconv" + +const ( + year68 = 1 << 31 // For RFC1982 (Serial Arithmetic) calculations in 32 bits. + defaultTtl = 3600 // Default internal TTL. + + DefaultMsgSize = 4096 // DefaultMsgSize is the standard default for messages larger than 512 bytes. + MinMsgSize = 512 // MinMsgSize is the minimal size of a DNS packet. + MaxMsgSize = 65535 // MaxMsgSize is the largest possible DNS packet. +) + +// Error represents a DNS error. +type Error struct{ err string } + +func (e *Error) Error() string { + if e == nil { + return "dns: " + } + return "dns: " + e.err +} + +// An RR represents a resource record. +type RR interface { + // Header returns the header of an resource record. The header contains + // everything up to the rdata. + Header() *RR_Header + // String returns the text representation of the resource record. + String() string + + // copy returns a copy of the RR + copy() RR + // len returns the length (in octets) of the uncompressed RR in wire format. + len() int + // pack packs an RR into wire format. + pack([]byte, int, map[string]int, bool) (int, error) +} + +// RR_Header is the header all DNS resource records share. +type RR_Header struct { + Name string `dns:"cdomain-name"` + Rrtype uint16 + Class uint16 + Ttl uint32 + Rdlength uint16 // Length of data after header. +} + +// Header returns itself. This is here to make RR_Header implements the RR interface. +func (h *RR_Header) Header() *RR_Header { return h } + +// Just to implement the RR interface. +func (h *RR_Header) copy() RR { return nil } + +func (h *RR_Header) copyHeader() *RR_Header { + r := new(RR_Header) + r.Name = h.Name + r.Rrtype = h.Rrtype + r.Class = h.Class + r.Ttl = h.Ttl + r.Rdlength = h.Rdlength + return r +} + +func (h *RR_Header) String() string { + var s string + + if h.Rrtype == TypeOPT { + s = ";" + // and maybe other things + } + + s += sprintName(h.Name) + "\t" + s += strconv.FormatInt(int64(h.Ttl), 10) + "\t" + s += Class(h.Class).String() + "\t" + s += Type(h.Rrtype).String() + "\t" + return s +} + +func (h *RR_Header) len() int { + l := len(h.Name) + 1 + l += 10 // rrtype(2) + class(2) + ttl(4) + rdlength(2) + return l +} + +// ToRFC3597 converts a known RR to the unknown RR representation from RFC 3597. +func (rr *RFC3597) ToRFC3597(r RR) error { + buf := make([]byte, r.len()*2) + off, err := PackRR(r, buf, 0, nil, false) + if err != nil { + return err + } + buf = buf[:off] + if int(r.Header().Rdlength) > off { + return ErrBuf + } + + rfc3597, _, err := unpackRFC3597(*r.Header(), buf, off-int(r.Header().Rdlength)) + if err != nil { + return err + } + *rr = *rfc3597.(*RFC3597) + return nil +} diff --git a/vendor/github.com/miekg/dns/dnssec.go b/vendor/github.com/miekg/dns/dnssec.go new file mode 100644 index 00000000..f5f3fbdd --- /dev/null +++ b/vendor/github.com/miekg/dns/dnssec.go @@ -0,0 +1,721 @@ +package dns + +import ( + "bytes" + "crypto" + "crypto/dsa" + "crypto/ecdsa" + "crypto/elliptic" + _ "crypto/md5" + "crypto/rand" + "crypto/rsa" + _ "crypto/sha1" + _ "crypto/sha256" + _ "crypto/sha512" + "encoding/asn1" + "encoding/binary" + "encoding/hex" + "math/big" + "sort" + "strings" + "time" +) + +// DNSSEC encryption algorithm codes. +const ( + _ uint8 = iota + RSAMD5 + DH + DSA + _ // Skip 4, RFC 6725, section 2.1 + RSASHA1 + DSANSEC3SHA1 + RSASHA1NSEC3SHA1 + RSASHA256 + _ // Skip 9, RFC 6725, section 2.1 + RSASHA512 + _ // Skip 11, RFC 6725, section 2.1 + ECCGOST + ECDSAP256SHA256 + ECDSAP384SHA384 + INDIRECT uint8 = 252 + PRIVATEDNS uint8 = 253 // Private (experimental keys) + PRIVATEOID uint8 = 254 +) + +// Map for algorithm names. +var AlgorithmToString = map[uint8]string{ + RSAMD5: "RSAMD5", + DH: "DH", + DSA: "DSA", + RSASHA1: "RSASHA1", + DSANSEC3SHA1: "DSA-NSEC3-SHA1", + RSASHA1NSEC3SHA1: "RSASHA1-NSEC3-SHA1", + RSASHA256: "RSASHA256", + RSASHA512: "RSASHA512", + ECCGOST: "ECC-GOST", + ECDSAP256SHA256: "ECDSAP256SHA256", + ECDSAP384SHA384: "ECDSAP384SHA384", + INDIRECT: "INDIRECT", + PRIVATEDNS: "PRIVATEDNS", + PRIVATEOID: "PRIVATEOID", +} + +// Map of algorithm strings. +var StringToAlgorithm = reverseInt8(AlgorithmToString) + +// Map of algorithm crypto hashes. +var AlgorithmToHash = map[uint8]crypto.Hash{ + RSAMD5: crypto.MD5, // Deprecated in RFC 6725 + RSASHA1: crypto.SHA1, + RSASHA1NSEC3SHA1: crypto.SHA1, + RSASHA256: crypto.SHA256, + ECDSAP256SHA256: crypto.SHA256, + ECDSAP384SHA384: crypto.SHA384, + RSASHA512: crypto.SHA512, +} + +// DNSSEC hashing algorithm codes. +const ( + _ uint8 = iota + SHA1 // RFC 4034 + SHA256 // RFC 4509 + GOST94 // RFC 5933 + SHA384 // Experimental + SHA512 // Experimental +) + +// Map for hash names. +var HashToString = map[uint8]string{ + SHA1: "SHA1", + SHA256: "SHA256", + GOST94: "GOST94", + SHA384: "SHA384", + SHA512: "SHA512", +} + +// Map of hash strings. +var StringToHash = reverseInt8(HashToString) + +// DNSKEY flag values. +const ( + SEP = 1 + REVOKE = 1 << 7 + ZONE = 1 << 8 +) + +// The RRSIG needs to be converted to wireformat with some of the rdata (the signature) missing. +type rrsigWireFmt struct { + TypeCovered uint16 + Algorithm uint8 + Labels uint8 + OrigTtl uint32 + Expiration uint32 + Inception uint32 + KeyTag uint16 + SignerName string `dns:"domain-name"` + /* No Signature */ +} + +// Used for converting DNSKEY's rdata to wirefmt. +type dnskeyWireFmt struct { + Flags uint16 + Protocol uint8 + Algorithm uint8 + PublicKey string `dns:"base64"` + /* Nothing is left out */ +} + +func divRoundUp(a, b int) int { + return (a + b - 1) / b +} + +// KeyTag calculates the keytag (or key-id) of the DNSKEY. +func (k *DNSKEY) KeyTag() uint16 { + if k == nil { + return 0 + } + var keytag int + switch k.Algorithm { + case RSAMD5: + // Look at the bottom two bytes of the modules, which the last + // item in the pubkey. We could do this faster by looking directly + // at the base64 values. But I'm lazy. + modulus, _ := fromBase64([]byte(k.PublicKey)) + if len(modulus) > 1 { + x := binary.BigEndian.Uint16(modulus[len(modulus)-2:]) + keytag = int(x) + } + default: + keywire := new(dnskeyWireFmt) + keywire.Flags = k.Flags + keywire.Protocol = k.Protocol + keywire.Algorithm = k.Algorithm + keywire.PublicKey = k.PublicKey + wire := make([]byte, DefaultMsgSize) + n, err := packKeyWire(keywire, wire) + if err != nil { + return 0 + } + wire = wire[:n] + for i, v := range wire { + if i&1 != 0 { + keytag += int(v) // must be larger than uint32 + } else { + keytag += int(v) << 8 + } + } + keytag += (keytag >> 16) & 0xFFFF + keytag &= 0xFFFF + } + return uint16(keytag) +} + +// ToDS converts a DNSKEY record to a DS record. +func (k *DNSKEY) ToDS(h uint8) *DS { + if k == nil { + return nil + } + ds := new(DS) + ds.Hdr.Name = k.Hdr.Name + ds.Hdr.Class = k.Hdr.Class + ds.Hdr.Rrtype = TypeDS + ds.Hdr.Ttl = k.Hdr.Ttl + ds.Algorithm = k.Algorithm + ds.DigestType = h + ds.KeyTag = k.KeyTag() + + keywire := new(dnskeyWireFmt) + keywire.Flags = k.Flags + keywire.Protocol = k.Protocol + keywire.Algorithm = k.Algorithm + keywire.PublicKey = k.PublicKey + wire := make([]byte, DefaultMsgSize) + n, err := packKeyWire(keywire, wire) + if err != nil { + return nil + } + wire = wire[:n] + + owner := make([]byte, 255) + off, err1 := PackDomainName(strings.ToLower(k.Hdr.Name), owner, 0, nil, false) + if err1 != nil { + return nil + } + owner = owner[:off] + // RFC4034: + // digest = digest_algorithm( DNSKEY owner name | DNSKEY RDATA); + // "|" denotes concatenation + // DNSKEY RDATA = Flags | Protocol | Algorithm | Public Key. + + // digest buffer + digest := append(owner, wire...) // another copy + + var hash crypto.Hash + switch h { + case SHA1: + hash = crypto.SHA1 + case SHA256: + hash = crypto.SHA256 + case SHA384: + hash = crypto.SHA384 + case SHA512: + hash = crypto.SHA512 + default: + return nil + } + + s := hash.New() + s.Write(digest) + ds.Digest = hex.EncodeToString(s.Sum(nil)) + return ds +} + +// ToCDNSKEY converts a DNSKEY record to a CDNSKEY record. +func (k *DNSKEY) ToCDNSKEY() *CDNSKEY { + c := &CDNSKEY{DNSKEY: *k} + c.Hdr = *k.Hdr.copyHeader() + c.Hdr.Rrtype = TypeCDNSKEY + return c +} + +// ToCDS converts a DS record to a CDS record. +func (d *DS) ToCDS() *CDS { + c := &CDS{DS: *d} + c.Hdr = *d.Hdr.copyHeader() + c.Hdr.Rrtype = TypeCDS + return c +} + +// Sign signs an RRSet. The signature needs to be filled in with the values: +// Inception, Expiration, KeyTag, SignerName and Algorithm. The rest is copied +// from the RRset. Sign returns a non-nill error when the signing went OK. +// There is no check if RRSet is a proper (RFC 2181) RRSet. If OrigTTL is non +// zero, it is used as-is, otherwise the TTL of the RRset is used as the +// OrigTTL. +func (rr *RRSIG) Sign(k crypto.Signer, rrset []RR) error { + if k == nil { + return ErrPrivKey + } + // s.Inception and s.Expiration may be 0 (rollover etc.), the rest must be set + if rr.KeyTag == 0 || len(rr.SignerName) == 0 || rr.Algorithm == 0 { + return ErrKey + } + + rr.Hdr.Rrtype = TypeRRSIG + rr.Hdr.Name = rrset[0].Header().Name + rr.Hdr.Class = rrset[0].Header().Class + if rr.OrigTtl == 0 { // If set don't override + rr.OrigTtl = rrset[0].Header().Ttl + } + rr.TypeCovered = rrset[0].Header().Rrtype + rr.Labels = uint8(CountLabel(rrset[0].Header().Name)) + + if strings.HasPrefix(rrset[0].Header().Name, "*") { + rr.Labels-- // wildcard, remove from label count + } + + sigwire := new(rrsigWireFmt) + sigwire.TypeCovered = rr.TypeCovered + sigwire.Algorithm = rr.Algorithm + sigwire.Labels = rr.Labels + sigwire.OrigTtl = rr.OrigTtl + sigwire.Expiration = rr.Expiration + sigwire.Inception = rr.Inception + sigwire.KeyTag = rr.KeyTag + // For signing, lowercase this name + sigwire.SignerName = strings.ToLower(rr.SignerName) + + // Create the desired binary blob + signdata := make([]byte, DefaultMsgSize) + n, err := packSigWire(sigwire, signdata) + if err != nil { + return err + } + signdata = signdata[:n] + wire, err := rawSignatureData(rrset, rr) + if err != nil { + return err + } + signdata = append(signdata, wire...) + + hash, ok := AlgorithmToHash[rr.Algorithm] + if !ok { + return ErrAlg + } + + h := hash.New() + h.Write(signdata) + + signature, err := sign(k, h.Sum(nil), hash, rr.Algorithm) + if err != nil { + return err + } + + rr.Signature = toBase64(signature) + + return nil +} + +func sign(k crypto.Signer, hashed []byte, hash crypto.Hash, alg uint8) ([]byte, error) { + signature, err := k.Sign(rand.Reader, hashed, hash) + if err != nil { + return nil, err + } + + switch alg { + case RSASHA1, RSASHA1NSEC3SHA1, RSASHA256, RSASHA512: + return signature, nil + + case ECDSAP256SHA256, ECDSAP384SHA384: + ecdsaSignature := &struct { + R, S *big.Int + }{} + if _, err := asn1.Unmarshal(signature, ecdsaSignature); err != nil { + return nil, err + } + + var intlen int + switch alg { + case ECDSAP256SHA256: + intlen = 32 + case ECDSAP384SHA384: + intlen = 48 + } + + signature := intToBytes(ecdsaSignature.R, intlen) + signature = append(signature, intToBytes(ecdsaSignature.S, intlen)...) + return signature, nil + + // There is no defined interface for what a DSA backed crypto.Signer returns + case DSA, DSANSEC3SHA1: + // t := divRoundUp(divRoundUp(p.PublicKey.Y.BitLen(), 8)-64, 8) + // signature := []byte{byte(t)} + // signature = append(signature, intToBytes(r1, 20)...) + // signature = append(signature, intToBytes(s1, 20)...) + // rr.Signature = signature + } + + return nil, ErrAlg +} + +// Verify validates an RRSet with the signature and key. This is only the +// cryptographic test, the signature validity period must be checked separately. +// This function copies the rdata of some RRs (to lowercase domain names) for the validation to work. +func (rr *RRSIG) Verify(k *DNSKEY, rrset []RR) error { + // First the easy checks + if !IsRRset(rrset) { + return ErrRRset + } + if rr.KeyTag != k.KeyTag() { + return ErrKey + } + if rr.Hdr.Class != k.Hdr.Class { + return ErrKey + } + if rr.Algorithm != k.Algorithm { + return ErrKey + } + if strings.ToLower(rr.SignerName) != strings.ToLower(k.Hdr.Name) { + return ErrKey + } + if k.Protocol != 3 { + return ErrKey + } + + // IsRRset checked that we have at least one RR and that the RRs in + // the set have consistent type, class, and name. Also check that type and + // class matches the RRSIG record. + if rrset[0].Header().Class != rr.Hdr.Class { + return ErrRRset + } + if rrset[0].Header().Rrtype != rr.TypeCovered { + return ErrRRset + } + + // RFC 4035 5.3.2. Reconstructing the Signed Data + // Copy the sig, except the rrsig data + sigwire := new(rrsigWireFmt) + sigwire.TypeCovered = rr.TypeCovered + sigwire.Algorithm = rr.Algorithm + sigwire.Labels = rr.Labels + sigwire.OrigTtl = rr.OrigTtl + sigwire.Expiration = rr.Expiration + sigwire.Inception = rr.Inception + sigwire.KeyTag = rr.KeyTag + sigwire.SignerName = strings.ToLower(rr.SignerName) + // Create the desired binary blob + signeddata := make([]byte, DefaultMsgSize) + n, err := packSigWire(sigwire, signeddata) + if err != nil { + return err + } + signeddata = signeddata[:n] + wire, err := rawSignatureData(rrset, rr) + if err != nil { + return err + } + signeddata = append(signeddata, wire...) + + sigbuf := rr.sigBuf() // Get the binary signature data + if rr.Algorithm == PRIVATEDNS { // PRIVATEOID + // TODO(miek) + // remove the domain name and assume its ours? + } + + hash, ok := AlgorithmToHash[rr.Algorithm] + if !ok { + return ErrAlg + } + + switch rr.Algorithm { + case RSASHA1, RSASHA1NSEC3SHA1, RSASHA256, RSASHA512, RSAMD5: + // TODO(mg): this can be done quicker, ie. cache the pubkey data somewhere?? + pubkey := k.publicKeyRSA() // Get the key + if pubkey == nil { + return ErrKey + } + + h := hash.New() + h.Write(signeddata) + return rsa.VerifyPKCS1v15(pubkey, hash, h.Sum(nil), sigbuf) + + case ECDSAP256SHA256, ECDSAP384SHA384: + pubkey := k.publicKeyECDSA() + if pubkey == nil { + return ErrKey + } + + // Split sigbuf into the r and s coordinates + r := new(big.Int).SetBytes(sigbuf[:len(sigbuf)/2]) + s := new(big.Int).SetBytes(sigbuf[len(sigbuf)/2:]) + + h := hash.New() + h.Write(signeddata) + if ecdsa.Verify(pubkey, h.Sum(nil), r, s) { + return nil + } + return ErrSig + + default: + return ErrAlg + } +} + +// ValidityPeriod uses RFC1982 serial arithmetic to calculate +// if a signature period is valid. If t is the zero time, the +// current time is taken other t is. Returns true if the signature +// is valid at the given time, otherwise returns false. +func (rr *RRSIG) ValidityPeriod(t time.Time) bool { + var utc int64 + if t.IsZero() { + utc = time.Now().UTC().Unix() + } else { + utc = t.UTC().Unix() + } + modi := (int64(rr.Inception) - utc) / year68 + mode := (int64(rr.Expiration) - utc) / year68 + ti := int64(rr.Inception) + (modi * year68) + te := int64(rr.Expiration) + (mode * year68) + return ti <= utc && utc <= te +} + +// Return the signatures base64 encodedig sigdata as a byte slice. +func (rr *RRSIG) sigBuf() []byte { + sigbuf, err := fromBase64([]byte(rr.Signature)) + if err != nil { + return nil + } + return sigbuf +} + +// publicKeyRSA returns the RSA public key from a DNSKEY record. +func (k *DNSKEY) publicKeyRSA() *rsa.PublicKey { + keybuf, err := fromBase64([]byte(k.PublicKey)) + if err != nil { + return nil + } + + // RFC 2537/3110, section 2. RSA Public KEY Resource Records + // Length is in the 0th byte, unless its zero, then it + // it in bytes 1 and 2 and its a 16 bit number + explen := uint16(keybuf[0]) + keyoff := 1 + if explen == 0 { + explen = uint16(keybuf[1])<<8 | uint16(keybuf[2]) + keyoff = 3 + } + pubkey := new(rsa.PublicKey) + + pubkey.N = big.NewInt(0) + shift := uint64((explen - 1) * 8) + expo := uint64(0) + for i := int(explen - 1); i > 0; i-- { + expo += uint64(keybuf[keyoff+i]) << shift + shift -= 8 + } + // Remainder + expo += uint64(keybuf[keyoff]) + if expo > 2<<31 { + // Larger expo than supported. + // println("dns: F5 primes (or larger) are not supported") + return nil + } + pubkey.E = int(expo) + + pubkey.N.SetBytes(keybuf[keyoff+int(explen):]) + return pubkey +} + +// publicKeyECDSA returns the Curve public key from the DNSKEY record. +func (k *DNSKEY) publicKeyECDSA() *ecdsa.PublicKey { + keybuf, err := fromBase64([]byte(k.PublicKey)) + if err != nil { + return nil + } + pubkey := new(ecdsa.PublicKey) + switch k.Algorithm { + case ECDSAP256SHA256: + pubkey.Curve = elliptic.P256() + if len(keybuf) != 64 { + // wrongly encoded key + return nil + } + case ECDSAP384SHA384: + pubkey.Curve = elliptic.P384() + if len(keybuf) != 96 { + // Wrongly encoded key + return nil + } + } + pubkey.X = big.NewInt(0) + pubkey.X.SetBytes(keybuf[:len(keybuf)/2]) + pubkey.Y = big.NewInt(0) + pubkey.Y.SetBytes(keybuf[len(keybuf)/2:]) + return pubkey +} + +func (k *DNSKEY) publicKeyDSA() *dsa.PublicKey { + keybuf, err := fromBase64([]byte(k.PublicKey)) + if err != nil { + return nil + } + if len(keybuf) < 22 { + return nil + } + t, keybuf := int(keybuf[0]), keybuf[1:] + size := 64 + t*8 + q, keybuf := keybuf[:20], keybuf[20:] + if len(keybuf) != 3*size { + return nil + } + p, keybuf := keybuf[:size], keybuf[size:] + g, y := keybuf[:size], keybuf[size:] + pubkey := new(dsa.PublicKey) + pubkey.Parameters.Q = big.NewInt(0).SetBytes(q) + pubkey.Parameters.P = big.NewInt(0).SetBytes(p) + pubkey.Parameters.G = big.NewInt(0).SetBytes(g) + pubkey.Y = big.NewInt(0).SetBytes(y) + return pubkey +} + +type wireSlice [][]byte + +func (p wireSlice) Len() int { return len(p) } +func (p wireSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } +func (p wireSlice) Less(i, j int) bool { + _, ioff, _ := UnpackDomainName(p[i], 0) + _, joff, _ := UnpackDomainName(p[j], 0) + return bytes.Compare(p[i][ioff+10:], p[j][joff+10:]) < 0 +} + +// Return the raw signature data. +func rawSignatureData(rrset []RR, s *RRSIG) (buf []byte, err error) { + wires := make(wireSlice, len(rrset)) + for i, r := range rrset { + r1 := r.copy() + r1.Header().Ttl = s.OrigTtl + labels := SplitDomainName(r1.Header().Name) + // 6.2. Canonical RR Form. (4) - wildcards + if len(labels) > int(s.Labels) { + // Wildcard + r1.Header().Name = "*." + strings.Join(labels[len(labels)-int(s.Labels):], ".") + "." + } + // RFC 4034: 6.2. Canonical RR Form. (2) - domain name to lowercase + r1.Header().Name = strings.ToLower(r1.Header().Name) + // 6.2. Canonical RR Form. (3) - domain rdata to lowercase. + // NS, MD, MF, CNAME, SOA, MB, MG, MR, PTR, + // HINFO, MINFO, MX, RP, AFSDB, RT, SIG, PX, NXT, NAPTR, KX, + // SRV, DNAME, A6 + // + // RFC 6840 - Clarifications and Implementation Notes for DNS Security (DNSSEC): + // Section 6.2 of [RFC4034] also erroneously lists HINFO as a record + // that needs conversion to lowercase, and twice at that. Since HINFO + // records contain no domain names, they are not subject to case + // conversion. + switch x := r1.(type) { + case *NS: + x.Ns = strings.ToLower(x.Ns) + case *CNAME: + x.Target = strings.ToLower(x.Target) + case *SOA: + x.Ns = strings.ToLower(x.Ns) + x.Mbox = strings.ToLower(x.Mbox) + case *MB: + x.Mb = strings.ToLower(x.Mb) + case *MG: + x.Mg = strings.ToLower(x.Mg) + case *MR: + x.Mr = strings.ToLower(x.Mr) + case *PTR: + x.Ptr = strings.ToLower(x.Ptr) + case *MINFO: + x.Rmail = strings.ToLower(x.Rmail) + x.Email = strings.ToLower(x.Email) + case *MX: + x.Mx = strings.ToLower(x.Mx) + case *NAPTR: + x.Replacement = strings.ToLower(x.Replacement) + case *KX: + x.Exchanger = strings.ToLower(x.Exchanger) + case *SRV: + x.Target = strings.ToLower(x.Target) + case *DNAME: + x.Target = strings.ToLower(x.Target) + } + // 6.2. Canonical RR Form. (5) - origTTL + wire := make([]byte, r1.len()+1) // +1 to be safe(r) + off, err1 := PackRR(r1, wire, 0, nil, false) + if err1 != nil { + return nil, err1 + } + wire = wire[:off] + wires[i] = wire + } + sort.Sort(wires) + for i, wire := range wires { + if i > 0 && bytes.Equal(wire, wires[i-1]) { + continue + } + buf = append(buf, wire...) + } + return buf, nil +} + +func packSigWire(sw *rrsigWireFmt, msg []byte) (int, error) { + // copied from zmsg.go RRSIG packing + off, err := packUint16(sw.TypeCovered, msg, 0) + if err != nil { + return off, err + } + off, err = packUint8(sw.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(sw.Labels, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(sw.OrigTtl, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(sw.Expiration, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(sw.Inception, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(sw.KeyTag, msg, off) + if err != nil { + return off, err + } + off, err = PackDomainName(sw.SignerName, msg, off, nil, false) + if err != nil { + return off, err + } + return off, nil +} + +func packKeyWire(dw *dnskeyWireFmt, msg []byte) (int, error) { + // copied from zmsg.go DNSKEY packing + off, err := packUint16(dw.Flags, msg, 0) + if err != nil { + return off, err + } + off, err = packUint8(dw.Protocol, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(dw.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packStringBase64(dw.PublicKey, msg, off) + if err != nil { + return off, err + } + return off, nil +} diff --git a/vendor/github.com/miekg/dns/dnssec_keygen.go b/vendor/github.com/miekg/dns/dnssec_keygen.go new file mode 100644 index 00000000..229a0793 --- /dev/null +++ b/vendor/github.com/miekg/dns/dnssec_keygen.go @@ -0,0 +1,156 @@ +package dns + +import ( + "crypto" + "crypto/dsa" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/rsa" + "math/big" +) + +// Generate generates a DNSKEY of the given bit size. +// The public part is put inside the DNSKEY record. +// The Algorithm in the key must be set as this will define +// what kind of DNSKEY will be generated. +// The ECDSA algorithms imply a fixed keysize, in that case +// bits should be set to the size of the algorithm. +func (k *DNSKEY) Generate(bits int) (crypto.PrivateKey, error) { + switch k.Algorithm { + case DSA, DSANSEC3SHA1: + if bits != 1024 { + return nil, ErrKeySize + } + case RSAMD5, RSASHA1, RSASHA256, RSASHA1NSEC3SHA1: + if bits < 512 || bits > 4096 { + return nil, ErrKeySize + } + case RSASHA512: + if bits < 1024 || bits > 4096 { + return nil, ErrKeySize + } + case ECDSAP256SHA256: + if bits != 256 { + return nil, ErrKeySize + } + case ECDSAP384SHA384: + if bits != 384 { + return nil, ErrKeySize + } + } + + switch k.Algorithm { + case DSA, DSANSEC3SHA1: + params := new(dsa.Parameters) + if err := dsa.GenerateParameters(params, rand.Reader, dsa.L1024N160); err != nil { + return nil, err + } + priv := new(dsa.PrivateKey) + priv.PublicKey.Parameters = *params + err := dsa.GenerateKey(priv, rand.Reader) + if err != nil { + return nil, err + } + k.setPublicKeyDSA(params.Q, params.P, params.G, priv.PublicKey.Y) + return priv, nil + case RSAMD5, RSASHA1, RSASHA256, RSASHA512, RSASHA1NSEC3SHA1: + priv, err := rsa.GenerateKey(rand.Reader, bits) + if err != nil { + return nil, err + } + k.setPublicKeyRSA(priv.PublicKey.E, priv.PublicKey.N) + return priv, nil + case ECDSAP256SHA256, ECDSAP384SHA384: + var c elliptic.Curve + switch k.Algorithm { + case ECDSAP256SHA256: + c = elliptic.P256() + case ECDSAP384SHA384: + c = elliptic.P384() + } + priv, err := ecdsa.GenerateKey(c, rand.Reader) + if err != nil { + return nil, err + } + k.setPublicKeyECDSA(priv.PublicKey.X, priv.PublicKey.Y) + return priv, nil + default: + return nil, ErrAlg + } +} + +// Set the public key (the value E and N) +func (k *DNSKEY) setPublicKeyRSA(_E int, _N *big.Int) bool { + if _E == 0 || _N == nil { + return false + } + buf := exponentToBuf(_E) + buf = append(buf, _N.Bytes()...) + k.PublicKey = toBase64(buf) + return true +} + +// Set the public key for Elliptic Curves +func (k *DNSKEY) setPublicKeyECDSA(_X, _Y *big.Int) bool { + if _X == nil || _Y == nil { + return false + } + var intlen int + switch k.Algorithm { + case ECDSAP256SHA256: + intlen = 32 + case ECDSAP384SHA384: + intlen = 48 + } + k.PublicKey = toBase64(curveToBuf(_X, _Y, intlen)) + return true +} + +// Set the public key for DSA +func (k *DNSKEY) setPublicKeyDSA(_Q, _P, _G, _Y *big.Int) bool { + if _Q == nil || _P == nil || _G == nil || _Y == nil { + return false + } + buf := dsaToBuf(_Q, _P, _G, _Y) + k.PublicKey = toBase64(buf) + return true +} + +// Set the public key (the values E and N) for RSA +// RFC 3110: Section 2. RSA Public KEY Resource Records +func exponentToBuf(_E int) []byte { + var buf []byte + i := big.NewInt(int64(_E)) + if len(i.Bytes()) < 256 { + buf = make([]byte, 1) + buf[0] = uint8(len(i.Bytes())) + } else { + buf = make([]byte, 3) + buf[0] = 0 + buf[1] = uint8(len(i.Bytes()) >> 8) + buf[2] = uint8(len(i.Bytes())) + } + buf = append(buf, i.Bytes()...) + return buf +} + +// Set the public key for X and Y for Curve. The two +// values are just concatenated. +func curveToBuf(_X, _Y *big.Int, intlen int) []byte { + buf := intToBytes(_X, intlen) + buf = append(buf, intToBytes(_Y, intlen)...) + return buf +} + +// Set the public key for X and Y for Curve. The two +// values are just concatenated. +func dsaToBuf(_Q, _P, _G, _Y *big.Int) []byte { + t := divRoundUp(divRoundUp(_G.BitLen(), 8)-64, 8) + buf := []byte{byte(t)} + buf = append(buf, intToBytes(_Q, 20)...) + buf = append(buf, intToBytes(_P, 64+t*8)...) + buf = append(buf, intToBytes(_G, 64+t*8)...) + buf = append(buf, intToBytes(_Y, 64+t*8)...) + return buf +} diff --git a/vendor/github.com/miekg/dns/dnssec_keyscan.go b/vendor/github.com/miekg/dns/dnssec_keyscan.go new file mode 100644 index 00000000..9ff3a617 --- /dev/null +++ b/vendor/github.com/miekg/dns/dnssec_keyscan.go @@ -0,0 +1,249 @@ +package dns + +import ( + "crypto" + "crypto/dsa" + "crypto/ecdsa" + "crypto/rsa" + "io" + "math/big" + "strconv" + "strings" +) + +// NewPrivateKey returns a PrivateKey by parsing the string s. +// s should be in the same form of the BIND private key files. +func (k *DNSKEY) NewPrivateKey(s string) (crypto.PrivateKey, error) { + if s == "" || s[len(s)-1] != '\n' { // We need a closing newline + return k.ReadPrivateKey(strings.NewReader(s+"\n"), "") + } + return k.ReadPrivateKey(strings.NewReader(s), "") +} + +// ReadPrivateKey reads a private key from the io.Reader q. The string file is +// only used in error reporting. +// The public key must be known, because some cryptographic algorithms embed +// the public inside the privatekey. +func (k *DNSKEY) ReadPrivateKey(q io.Reader, file string) (crypto.PrivateKey, error) { + m, err := parseKey(q, file) + if m == nil { + return nil, err + } + if _, ok := m["private-key-format"]; !ok { + return nil, ErrPrivKey + } + if m["private-key-format"] != "v1.2" && m["private-key-format"] != "v1.3" { + return nil, ErrPrivKey + } + // TODO(mg): check if the pubkey matches the private key + algo, err := strconv.Atoi(strings.SplitN(m["algorithm"], " ", 2)[0]) + if err != nil { + return nil, ErrPrivKey + } + switch uint8(algo) { + case DSA: + priv, err := readPrivateKeyDSA(m) + if err != nil { + return nil, err + } + pub := k.publicKeyDSA() + if pub == nil { + return nil, ErrKey + } + priv.PublicKey = *pub + return priv, nil + case RSAMD5: + fallthrough + case RSASHA1: + fallthrough + case RSASHA1NSEC3SHA1: + fallthrough + case RSASHA256: + fallthrough + case RSASHA512: + priv, err := readPrivateKeyRSA(m) + if err != nil { + return nil, err + } + pub := k.publicKeyRSA() + if pub == nil { + return nil, ErrKey + } + priv.PublicKey = *pub + return priv, nil + case ECCGOST: + return nil, ErrPrivKey + case ECDSAP256SHA256: + fallthrough + case ECDSAP384SHA384: + priv, err := readPrivateKeyECDSA(m) + if err != nil { + return nil, err + } + pub := k.publicKeyECDSA() + if pub == nil { + return nil, ErrKey + } + priv.PublicKey = *pub + return priv, nil + default: + return nil, ErrPrivKey + } +} + +// Read a private key (file) string and create a public key. Return the private key. +func readPrivateKeyRSA(m map[string]string) (*rsa.PrivateKey, error) { + p := new(rsa.PrivateKey) + p.Primes = []*big.Int{nil, nil} + for k, v := range m { + switch k { + case "modulus", "publicexponent", "privateexponent", "prime1", "prime2": + v1, err := fromBase64([]byte(v)) + if err != nil { + return nil, err + } + switch k { + case "modulus": + p.PublicKey.N = big.NewInt(0) + p.PublicKey.N.SetBytes(v1) + case "publicexponent": + i := big.NewInt(0) + i.SetBytes(v1) + p.PublicKey.E = int(i.Int64()) // int64 should be large enough + case "privateexponent": + p.D = big.NewInt(0) + p.D.SetBytes(v1) + case "prime1": + p.Primes[0] = big.NewInt(0) + p.Primes[0].SetBytes(v1) + case "prime2": + p.Primes[1] = big.NewInt(0) + p.Primes[1].SetBytes(v1) + } + case "exponent1", "exponent2", "coefficient": + // not used in Go (yet) + case "created", "publish", "activate": + // not used in Go (yet) + } + } + return p, nil +} + +func readPrivateKeyDSA(m map[string]string) (*dsa.PrivateKey, error) { + p := new(dsa.PrivateKey) + p.X = big.NewInt(0) + for k, v := range m { + switch k { + case "private_value(x)": + v1, err := fromBase64([]byte(v)) + if err != nil { + return nil, err + } + p.X.SetBytes(v1) + case "created", "publish", "activate": + /* not used in Go (yet) */ + } + } + return p, nil +} + +func readPrivateKeyECDSA(m map[string]string) (*ecdsa.PrivateKey, error) { + p := new(ecdsa.PrivateKey) + p.D = big.NewInt(0) + // TODO: validate that the required flags are present + for k, v := range m { + switch k { + case "privatekey": + v1, err := fromBase64([]byte(v)) + if err != nil { + return nil, err + } + p.D.SetBytes(v1) + case "created", "publish", "activate": + /* not used in Go (yet) */ + } + } + return p, nil +} + +// parseKey reads a private key from r. It returns a map[string]string, +// with the key-value pairs, or an error when the file is not correct. +func parseKey(r io.Reader, file string) (map[string]string, error) { + s := scanInit(r) + m := make(map[string]string) + c := make(chan lex) + k := "" + // Start the lexer + go klexer(s, c) + for l := range c { + // It should alternate + switch l.value { + case zKey: + k = l.token + case zValue: + if k == "" { + return nil, &ParseError{file, "no private key seen", l} + } + //println("Setting", strings.ToLower(k), "to", l.token, "b") + m[strings.ToLower(k)] = l.token + k = "" + } + } + return m, nil +} + +// klexer scans the sourcefile and returns tokens on the channel c. +func klexer(s *scan, c chan lex) { + var l lex + str := "" // Hold the current read text + commt := false + key := true + x, err := s.tokenText() + defer close(c) + for err == nil { + l.column = s.position.Column + l.line = s.position.Line + switch x { + case ':': + if commt { + break + } + l.token = str + if key { + l.value = zKey + c <- l + // Next token is a space, eat it + s.tokenText() + key = false + str = "" + } else { + l.value = zValue + } + case ';': + commt = true + case '\n': + if commt { + // Reset a comment + commt = false + } + l.value = zValue + l.token = str + c <- l + str = "" + commt = false + key = true + default: + if commt { + break + } + str += string(x) + } + x, err = s.tokenText() + } + if len(str) > 0 { + // Send remainder + l.token = str + l.value = zValue + c <- l + } +} diff --git a/vendor/github.com/miekg/dns/dnssec_privkey.go b/vendor/github.com/miekg/dns/dnssec_privkey.go new file mode 100644 index 00000000..56f3ea93 --- /dev/null +++ b/vendor/github.com/miekg/dns/dnssec_privkey.go @@ -0,0 +1,85 @@ +package dns + +import ( + "crypto" + "crypto/dsa" + "crypto/ecdsa" + "crypto/rsa" + "math/big" + "strconv" +) + +const format = "Private-key-format: v1.3\n" + +// PrivateKeyString converts a PrivateKey to a string. This string has the same +// format as the private-key-file of BIND9 (Private-key-format: v1.3). +// It needs some info from the key (the algorithm), so its a method of the DNSKEY +// It supports rsa.PrivateKey, ecdsa.PrivateKey and dsa.PrivateKey +func (r *DNSKEY) PrivateKeyString(p crypto.PrivateKey) string { + algorithm := strconv.Itoa(int(r.Algorithm)) + algorithm += " (" + AlgorithmToString[r.Algorithm] + ")" + + switch p := p.(type) { + case *rsa.PrivateKey: + modulus := toBase64(p.PublicKey.N.Bytes()) + e := big.NewInt(int64(p.PublicKey.E)) + publicExponent := toBase64(e.Bytes()) + privateExponent := toBase64(p.D.Bytes()) + prime1 := toBase64(p.Primes[0].Bytes()) + prime2 := toBase64(p.Primes[1].Bytes()) + // Calculate Exponent1/2 and Coefficient as per: http://en.wikipedia.org/wiki/RSA#Using_the_Chinese_remainder_algorithm + // and from: http://code.google.com/p/go/issues/detail?id=987 + one := big.NewInt(1) + p1 := big.NewInt(0).Sub(p.Primes[0], one) + q1 := big.NewInt(0).Sub(p.Primes[1], one) + exp1 := big.NewInt(0).Mod(p.D, p1) + exp2 := big.NewInt(0).Mod(p.D, q1) + coeff := big.NewInt(0).ModInverse(p.Primes[1], p.Primes[0]) + + exponent1 := toBase64(exp1.Bytes()) + exponent2 := toBase64(exp2.Bytes()) + coefficient := toBase64(coeff.Bytes()) + + return format + + "Algorithm: " + algorithm + "\n" + + "Modulus: " + modulus + "\n" + + "PublicExponent: " + publicExponent + "\n" + + "PrivateExponent: " + privateExponent + "\n" + + "Prime1: " + prime1 + "\n" + + "Prime2: " + prime2 + "\n" + + "Exponent1: " + exponent1 + "\n" + + "Exponent2: " + exponent2 + "\n" + + "Coefficient: " + coefficient + "\n" + + case *ecdsa.PrivateKey: + var intlen int + switch r.Algorithm { + case ECDSAP256SHA256: + intlen = 32 + case ECDSAP384SHA384: + intlen = 48 + } + private := toBase64(intToBytes(p.D, intlen)) + return format + + "Algorithm: " + algorithm + "\n" + + "PrivateKey: " + private + "\n" + + case *dsa.PrivateKey: + T := divRoundUp(divRoundUp(p.PublicKey.Parameters.G.BitLen(), 8)-64, 8) + prime := toBase64(intToBytes(p.PublicKey.Parameters.P, 64+T*8)) + subprime := toBase64(intToBytes(p.PublicKey.Parameters.Q, 20)) + base := toBase64(intToBytes(p.PublicKey.Parameters.G, 64+T*8)) + priv := toBase64(intToBytes(p.X, 20)) + pub := toBase64(intToBytes(p.PublicKey.Y, 64+T*8)) + return format + + "Algorithm: " + algorithm + "\n" + + "Prime(p): " + prime + "\n" + + "Subprime(q): " + subprime + "\n" + + "Base(g): " + base + "\n" + + "Private_value(x): " + priv + "\n" + + "Public_value(y): " + pub + "\n" + + default: + return "" + } +} diff --git a/vendor/github.com/miekg/dns/doc.go b/vendor/github.com/miekg/dns/doc.go new file mode 100644 index 00000000..e38753d7 --- /dev/null +++ b/vendor/github.com/miekg/dns/doc.go @@ -0,0 +1,251 @@ +/* +Package dns implements a full featured interface to the Domain Name System. +Server- and client-side programming is supported. +The package allows complete control over what is send out to the DNS. The package +API follows the less-is-more principle, by presenting a small, clean interface. + +The package dns supports (asynchronous) querying/replying, incoming/outgoing zone transfers, +TSIG, EDNS0, dynamic updates, notifies and DNSSEC validation/signing. +Note that domain names MUST be fully qualified, before sending them, unqualified +names in a message will result in a packing failure. + +Resource records are native types. They are not stored in wire format. +Basic usage pattern for creating a new resource record: + + r := new(dns.MX) + r.Hdr = dns.RR_Header{Name: "miek.nl.", Rrtype: dns.TypeMX, + Class: dns.ClassINET, Ttl: 3600} + r.Preference = 10 + r.Mx = "mx.miek.nl." + +Or directly from a string: + + mx, err := dns.NewRR("miek.nl. 3600 IN MX 10 mx.miek.nl.") + +Or when the default TTL (3600) and class (IN) suit you: + + mx, err := dns.NewRR("miek.nl. MX 10 mx.miek.nl.") + +Or even: + + mx, err := dns.NewRR("$ORIGIN nl.\nmiek 1H IN MX 10 mx.miek") + +In the DNS messages are exchanged, these messages contain resource +records (sets). Use pattern for creating a message: + + m := new(dns.Msg) + m.SetQuestion("miek.nl.", dns.TypeMX) + +Or when not certain if the domain name is fully qualified: + + m.SetQuestion(dns.Fqdn("miek.nl"), dns.TypeMX) + +The message m is now a message with the question section set to ask +the MX records for the miek.nl. zone. + +The following is slightly more verbose, but more flexible: + + m1 := new(dns.Msg) + m1.Id = dns.Id() + m1.RecursionDesired = true + m1.Question = make([]dns.Question, 1) + m1.Question[0] = dns.Question{"miek.nl.", dns.TypeMX, dns.ClassINET} + +After creating a message it can be send. +Basic use pattern for synchronous querying the DNS at a +server configured on 127.0.0.1 and port 53: + + c := new(dns.Client) + in, rtt, err := c.Exchange(m1, "127.0.0.1:53") + +Suppressing multiple outstanding queries (with the same question, type and +class) is as easy as setting: + + c.SingleInflight = true + +If these "advanced" features are not needed, a simple UDP query can be send, +with: + + in, err := dns.Exchange(m1, "127.0.0.1:53") + +When this functions returns you will get dns message. A dns message consists +out of four sections. +The question section: in.Question, the answer section: in.Answer, +the authority section: in.Ns and the additional section: in.Extra. + +Each of these sections (except the Question section) contain a []RR. Basic +use pattern for accessing the rdata of a TXT RR as the first RR in +the Answer section: + + if t, ok := in.Answer[0].(*dns.TXT); ok { + // do something with t.Txt + } + +Domain Name and TXT Character String Representations + +Both domain names and TXT character strings are converted to presentation +form both when unpacked and when converted to strings. + +For TXT character strings, tabs, carriage returns and line feeds will be +converted to \t, \r and \n respectively. Back slashes and quotations marks +will be escaped. Bytes below 32 and above 127 will be converted to \DDD +form. + +For domain names, in addition to the above rules brackets, periods, +spaces, semicolons and the at symbol are escaped. + +DNSSEC + +DNSSEC (DNS Security Extension) adds a layer of security to the DNS. It +uses public key cryptography to sign resource records. The +public keys are stored in DNSKEY records and the signatures in RRSIG records. + +Requesting DNSSEC information for a zone is done by adding the DO (DNSSEC OK) bit +to a request. + + m := new(dns.Msg) + m.SetEdns0(4096, true) + +Signature generation, signature verification and key generation are all supported. + +DYNAMIC UPDATES + +Dynamic updates reuses the DNS message format, but renames three of +the sections. Question is Zone, Answer is Prerequisite, Authority is +Update, only the Additional is not renamed. See RFC 2136 for the gory details. + +You can set a rather complex set of rules for the existence of absence of +certain resource records or names in a zone to specify if resource records +should be added or removed. The table from RFC 2136 supplemented with the Go +DNS function shows which functions exist to specify the prerequisites. + + 3.2.4 - Table Of Metavalues Used In Prerequisite Section + + CLASS TYPE RDATA Meaning Function + -------------------------------------------------------------- + ANY ANY empty Name is in use dns.NameUsed + ANY rrset empty RRset exists (value indep) dns.RRsetUsed + NONE ANY empty Name is not in use dns.NameNotUsed + NONE rrset empty RRset does not exist dns.RRsetNotUsed + zone rrset rr RRset exists (value dep) dns.Used + +The prerequisite section can also be left empty. +If you have decided on the prerequisites you can tell what RRs should +be added or deleted. The next table shows the options you have and +what functions to call. + + 3.4.2.6 - Table Of Metavalues Used In Update Section + + CLASS TYPE RDATA Meaning Function + --------------------------------------------------------------- + ANY ANY empty Delete all RRsets from name dns.RemoveName + ANY rrset empty Delete an RRset dns.RemoveRRset + NONE rrset rr Delete an RR from RRset dns.Remove + zone rrset rr Add to an RRset dns.Insert + +TRANSACTION SIGNATURE + +An TSIG or transaction signature adds a HMAC TSIG record to each message sent. +The supported algorithms include: HmacMD5, HmacSHA1, HmacSHA256 and HmacSHA512. + +Basic use pattern when querying with a TSIG name "axfr." (note that these key names +must be fully qualified - as they are domain names) and the base64 secret +"so6ZGir4GPAqINNh9U5c3A==": + + c := new(dns.Client) + c.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="} + m := new(dns.Msg) + m.SetQuestion("miek.nl.", dns.TypeMX) + m.SetTsig("axfr.", dns.HmacMD5, 300, time.Now().Unix()) + ... + // When sending the TSIG RR is calculated and filled in before sending + +When requesting an zone transfer (almost all TSIG usage is when requesting zone transfers), with +TSIG, this is the basic use pattern. In this example we request an AXFR for +miek.nl. with TSIG key named "axfr." and secret "so6ZGir4GPAqINNh9U5c3A==" +and using the server 176.58.119.54: + + t := new(dns.Transfer) + m := new(dns.Msg) + t.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="} + m.SetAxfr("miek.nl.") + m.SetTsig("axfr.", dns.HmacMD5, 300, time.Now().Unix()) + c, err := t.In(m, "176.58.119.54:53") + for r := range c { ... } + +You can now read the records from the transfer as they come in. Each envelope is checked with TSIG. +If something is not correct an error is returned. + +Basic use pattern validating and replying to a message that has TSIG set. + + server := &dns.Server{Addr: ":53", Net: "udp"} + server.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="} + go server.ListenAndServe() + dns.HandleFunc(".", handleRequest) + + func handleRequest(w dns.ResponseWriter, r *dns.Msg) { + m := new(dns.Msg) + m.SetReply(r) + if r.IsTsig() != nil { + if w.TsigStatus() == nil { + // *Msg r has an TSIG record and it was validated + m.SetTsig("axfr.", dns.HmacMD5, 300, time.Now().Unix()) + } else { + // *Msg r has an TSIG records and it was not valided + } + } + w.WriteMsg(m) + } + +PRIVATE RRS + +RFC 6895 sets aside a range of type codes for private use. This range +is 65,280 - 65,534 (0xFF00 - 0xFFFE). When experimenting with new Resource Records these +can be used, before requesting an official type code from IANA. + +see http://miek.nl/2014/September/21/idn-and-private-rr-in-go-dns/ for more +information. + +EDNS0 + +EDNS0 is an extension mechanism for the DNS defined in RFC 2671 and updated +by RFC 6891. It defines an new RR type, the OPT RR, which is then completely +abused. +Basic use pattern for creating an (empty) OPT RR: + + o := new(dns.OPT) + o.Hdr.Name = "." // MUST be the root zone, per definition. + o.Hdr.Rrtype = dns.TypeOPT + +The rdata of an OPT RR consists out of a slice of EDNS0 (RFC 6891) +interfaces. Currently only a few have been standardized: EDNS0_NSID +(RFC 5001) and EDNS0_SUBNET (draft-vandergaast-edns-client-subnet-02). Note +that these options may be combined in an OPT RR. +Basic use pattern for a server to check if (and which) options are set: + + // o is a dns.OPT + for _, s := range o.Option { + switch e := s.(type) { + case *dns.EDNS0_NSID: + // do stuff with e.Nsid + case *dns.EDNS0_SUBNET: + // access e.Family, e.Address, etc. + } + } + +SIG(0) + +From RFC 2931: + + SIG(0) provides protection for DNS transactions and requests .... + ... protection for glue records, DNS requests, protection for message headers + on requests and responses, and protection of the overall integrity of a response. + +It works like TSIG, except that SIG(0) uses public key cryptography, instead of the shared +secret approach in TSIG. +Supported algorithms: DSA, ECDSAP256SHA256, ECDSAP384SHA384, RSASHA1, RSASHA256 and +RSASHA512. + +Signing subsequent messages in multi-message sessions is not implemented. +*/ +package dns diff --git a/vendor/github.com/miekg/dns/edns.go b/vendor/github.com/miekg/dns/edns.go new file mode 100644 index 00000000..fc0b4692 --- /dev/null +++ b/vendor/github.com/miekg/dns/edns.go @@ -0,0 +1,597 @@ +package dns + +import ( + "encoding/binary" + "encoding/hex" + "errors" + "fmt" + "net" + "strconv" +) + +// EDNS0 Option codes. +const ( + EDNS0LLQ = 0x1 // long lived queries: http://tools.ietf.org/html/draft-sekar-dns-llq-01 + EDNS0UL = 0x2 // update lease draft: http://files.dns-sd.org/draft-sekar-dns-ul.txt + EDNS0NSID = 0x3 // nsid (RFC5001) + EDNS0DAU = 0x5 // DNSSEC Algorithm Understood + EDNS0DHU = 0x6 // DS Hash Understood + EDNS0N3U = 0x7 // NSEC3 Hash Understood + EDNS0SUBNET = 0x8 // client-subnet (RFC6891) + EDNS0EXPIRE = 0x9 // EDNS0 expire + EDNS0COOKIE = 0xa // EDNS0 Cookie + EDNS0TCPKEEPALIVE = 0xb // EDNS0 tcp keep alive (RFC7828) + EDNS0SUBNETDRAFT = 0x50fa // Don't use! Use EDNS0SUBNET + EDNS0LOCALSTART = 0xFDE9 // Beginning of range reserved for local/experimental use (RFC6891) + EDNS0LOCALEND = 0xFFFE // End of range reserved for local/experimental use (RFC6891) + _DO = 1 << 15 // dnssec ok +) + +// OPT is the EDNS0 RR appended to messages to convey extra (meta) information. +// See RFC 6891. +type OPT struct { + Hdr RR_Header + Option []EDNS0 `dns:"opt"` +} + +func (rr *OPT) String() string { + s := "\n;; OPT PSEUDOSECTION:\n; EDNS: version " + strconv.Itoa(int(rr.Version())) + "; " + if rr.Do() { + s += "flags: do; " + } else { + s += "flags: ; " + } + s += "udp: " + strconv.Itoa(int(rr.UDPSize())) + + for _, o := range rr.Option { + switch o.(type) { + case *EDNS0_NSID: + s += "\n; NSID: " + o.String() + h, e := o.pack() + var r string + if e == nil { + for _, c := range h { + r += "(" + string(c) + ")" + } + s += " " + r + } + case *EDNS0_SUBNET: + s += "\n; SUBNET: " + o.String() + if o.(*EDNS0_SUBNET).DraftOption { + s += " (draft)" + } + case *EDNS0_COOKIE: + s += "\n; COOKIE: " + o.String() + case *EDNS0_UL: + s += "\n; UPDATE LEASE: " + o.String() + case *EDNS0_LLQ: + s += "\n; LONG LIVED QUERIES: " + o.String() + case *EDNS0_DAU: + s += "\n; DNSSEC ALGORITHM UNDERSTOOD: " + o.String() + case *EDNS0_DHU: + s += "\n; DS HASH UNDERSTOOD: " + o.String() + case *EDNS0_N3U: + s += "\n; NSEC3 HASH UNDERSTOOD: " + o.String() + case *EDNS0_LOCAL: + s += "\n; LOCAL OPT: " + o.String() + } + } + return s +} + +func (rr *OPT) len() int { + l := rr.Hdr.len() + for i := 0; i < len(rr.Option); i++ { + l += 4 // Account for 2-byte option code and 2-byte option length. + lo, _ := rr.Option[i].pack() + l += len(lo) + } + return l +} + +// return the old value -> delete SetVersion? + +// Version returns the EDNS version used. Only zero is defined. +func (rr *OPT) Version() uint8 { + return uint8((rr.Hdr.Ttl & 0x00FF0000) >> 16) +} + +// SetVersion sets the version of EDNS. This is usually zero. +func (rr *OPT) SetVersion(v uint8) { + rr.Hdr.Ttl = rr.Hdr.Ttl&0xFF00FFFF | (uint32(v) << 16) +} + +// ExtendedRcode returns the EDNS extended RCODE field (the upper 8 bits of the TTL). +func (rr *OPT) ExtendedRcode() int { + return int((rr.Hdr.Ttl&0xFF000000)>>24) + 15 +} + +// SetExtendedRcode sets the EDNS extended RCODE field. +func (rr *OPT) SetExtendedRcode(v uint8) { + if v < RcodeBadVers { // Smaller than 16.. Use the 4 bits you have! + return + } + rr.Hdr.Ttl = rr.Hdr.Ttl&0x00FFFFFF | (uint32(v-15) << 24) +} + +// UDPSize returns the UDP buffer size. +func (rr *OPT) UDPSize() uint16 { + return rr.Hdr.Class +} + +// SetUDPSize sets the UDP buffer size. +func (rr *OPT) SetUDPSize(size uint16) { + rr.Hdr.Class = size +} + +// Do returns the value of the DO (DNSSEC OK) bit. +func (rr *OPT) Do() bool { + return rr.Hdr.Ttl&_DO == _DO +} + +// SetDo sets the DO (DNSSEC OK) bit. +// If we pass an argument, set the DO bit to that value. +// It is possible to pass 2 or more arguments. Any arguments after the 1st is silently ignored. +func (rr *OPT) SetDo(do ...bool) { + if len(do) == 1 { + if do[0] { + rr.Hdr.Ttl |= _DO + } else { + rr.Hdr.Ttl &^= _DO + } + } else { + rr.Hdr.Ttl |= _DO + } +} + +// EDNS0 defines an EDNS0 Option. An OPT RR can have multiple options appended to it. +type EDNS0 interface { + // Option returns the option code for the option. + Option() uint16 + // pack returns the bytes of the option data. + pack() ([]byte, error) + // unpack sets the data as found in the buffer. Is also sets + // the length of the slice as the length of the option data. + unpack([]byte) error + // String returns the string representation of the option. + String() string +} + +// The nsid EDNS0 option is used to retrieve a nameserver +// identifier. When sending a request Nsid must be set to the empty string +// The identifier is an opaque string encoded as hex. +// Basic use pattern for creating an nsid option: +// +// o := new(dns.OPT) +// o.Hdr.Name = "." +// o.Hdr.Rrtype = dns.TypeOPT +// e := new(dns.EDNS0_NSID) +// e.Code = dns.EDNS0NSID +// e.Nsid = "AA" +// o.Option = append(o.Option, e) +type EDNS0_NSID struct { + Code uint16 // Always EDNS0NSID + Nsid string // This string needs to be hex encoded +} + +func (e *EDNS0_NSID) pack() ([]byte, error) { + h, err := hex.DecodeString(e.Nsid) + if err != nil { + return nil, err + } + return h, nil +} + +func (e *EDNS0_NSID) Option() uint16 { return EDNS0NSID } +func (e *EDNS0_NSID) unpack(b []byte) error { e.Nsid = hex.EncodeToString(b); return nil } +func (e *EDNS0_NSID) String() string { return string(e.Nsid) } + +// EDNS0_SUBNET is the subnet option that is used to give the remote nameserver +// an idea of where the client lives. It can then give back a different +// answer depending on the location or network topology. +// Basic use pattern for creating an subnet option: +// +// o := new(dns.OPT) +// o.Hdr.Name = "." +// o.Hdr.Rrtype = dns.TypeOPT +// e := new(dns.EDNS0_SUBNET) +// e.Code = dns.EDNS0SUBNET +// e.Family = 1 // 1 for IPv4 source address, 2 for IPv6 +// e.NetMask = 32 // 32 for IPV4, 128 for IPv6 +// e.SourceScope = 0 +// e.Address = net.ParseIP("127.0.0.1").To4() // for IPv4 +// // e.Address = net.ParseIP("2001:7b8:32a::2") // for IPV6 +// o.Option = append(o.Option, e) +// +// Note: the spec (draft-ietf-dnsop-edns-client-subnet-00) has some insane logic +// for which netmask applies to the address. This code will parse all the +// available bits when unpacking (up to optlen). When packing it will apply +// SourceNetmask. If you need more advanced logic, patches welcome and good luck. +type EDNS0_SUBNET struct { + Code uint16 // Always EDNS0SUBNET + Family uint16 // 1 for IP, 2 for IP6 + SourceNetmask uint8 + SourceScope uint8 + Address net.IP + DraftOption bool // Set to true if using the old (0x50fa) option code +} + +func (e *EDNS0_SUBNET) Option() uint16 { + if e.DraftOption { + return EDNS0SUBNETDRAFT + } + return EDNS0SUBNET +} + +func (e *EDNS0_SUBNET) pack() ([]byte, error) { + b := make([]byte, 4) + binary.BigEndian.PutUint16(b[0:], e.Family) + b[2] = e.SourceNetmask + b[3] = e.SourceScope + switch e.Family { + case 1: + if e.SourceNetmask > net.IPv4len*8 { + return nil, errors.New("dns: bad netmask") + } + if len(e.Address.To4()) != net.IPv4len { + return nil, errors.New("dns: bad address") + } + ip := e.Address.To4().Mask(net.CIDRMask(int(e.SourceNetmask), net.IPv4len*8)) + needLength := (e.SourceNetmask + 8 - 1) / 8 // division rounding up + b = append(b, ip[:needLength]...) + case 2: + if e.SourceNetmask > net.IPv6len*8 { + return nil, errors.New("dns: bad netmask") + } + if len(e.Address) != net.IPv6len { + return nil, errors.New("dns: bad address") + } + ip := e.Address.Mask(net.CIDRMask(int(e.SourceNetmask), net.IPv6len*8)) + needLength := (e.SourceNetmask + 8 - 1) / 8 // division rounding up + b = append(b, ip[:needLength]...) + default: + return nil, errors.New("dns: bad address family") + } + return b, nil +} + +func (e *EDNS0_SUBNET) unpack(b []byte) error { + if len(b) < 4 { + return ErrBuf + } + e.Family = binary.BigEndian.Uint16(b) + e.SourceNetmask = b[2] + e.SourceScope = b[3] + switch e.Family { + case 1: + if e.SourceNetmask > net.IPv4len*8 || e.SourceScope > net.IPv4len*8 { + return errors.New("dns: bad netmask") + } + addr := make([]byte, net.IPv4len) + for i := 0; i < net.IPv4len && 4+i < len(b); i++ { + addr[i] = b[4+i] + } + e.Address = net.IPv4(addr[0], addr[1], addr[2], addr[3]) + case 2: + if e.SourceNetmask > net.IPv6len*8 || e.SourceScope > net.IPv6len*8 { + return errors.New("dns: bad netmask") + } + addr := make([]byte, net.IPv6len) + for i := 0; i < net.IPv6len && 4+i < len(b); i++ { + addr[i] = b[4+i] + } + e.Address = net.IP{addr[0], addr[1], addr[2], addr[3], addr[4], + addr[5], addr[6], addr[7], addr[8], addr[9], addr[10], + addr[11], addr[12], addr[13], addr[14], addr[15]} + default: + return errors.New("dns: bad address family") + } + return nil +} + +func (e *EDNS0_SUBNET) String() (s string) { + if e.Address == nil { + s = "" + } else if e.Address.To4() != nil { + s = e.Address.String() + } else { + s = "[" + e.Address.String() + "]" + } + s += "/" + strconv.Itoa(int(e.SourceNetmask)) + "/" + strconv.Itoa(int(e.SourceScope)) + return +} + +// The Cookie EDNS0 option +// +// o := new(dns.OPT) +// o.Hdr.Name = "." +// o.Hdr.Rrtype = dns.TypeOPT +// e := new(dns.EDNS0_COOKIE) +// e.Code = dns.EDNS0COOKIE +// e.Cookie = "24a5ac.." +// o.Option = append(o.Option, e) +// +// The Cookie field consists out of a client cookie (RFC 7873 Section 4), that is +// always 8 bytes. It may then optionally be followed by the server cookie. The server +// cookie is of variable length, 8 to a maximum of 32 bytes. In other words: +// +// cCookie := o.Cookie[:16] +// sCookie := o.Cookie[16:] +// +// There is no guarantee that the Cookie string has a specific length. +type EDNS0_COOKIE struct { + Code uint16 // Always EDNS0COOKIE + Cookie string // Hex-encoded cookie data +} + +func (e *EDNS0_COOKIE) pack() ([]byte, error) { + h, err := hex.DecodeString(e.Cookie) + if err != nil { + return nil, err + } + return h, nil +} + +func (e *EDNS0_COOKIE) Option() uint16 { return EDNS0COOKIE } +func (e *EDNS0_COOKIE) unpack(b []byte) error { e.Cookie = hex.EncodeToString(b); return nil } +func (e *EDNS0_COOKIE) String() string { return e.Cookie } + +// The EDNS0_UL (Update Lease) (draft RFC) option is used to tell the server to set +// an expiration on an update RR. This is helpful for clients that cannot clean +// up after themselves. This is a draft RFC and more information can be found at +// http://files.dns-sd.org/draft-sekar-dns-ul.txt +// +// o := new(dns.OPT) +// o.Hdr.Name = "." +// o.Hdr.Rrtype = dns.TypeOPT +// e := new(dns.EDNS0_UL) +// e.Code = dns.EDNS0UL +// e.Lease = 120 // in seconds +// o.Option = append(o.Option, e) +type EDNS0_UL struct { + Code uint16 // Always EDNS0UL + Lease uint32 +} + +func (e *EDNS0_UL) Option() uint16 { return EDNS0UL } +func (e *EDNS0_UL) String() string { return strconv.FormatUint(uint64(e.Lease), 10) } + +// Copied: http://golang.org/src/pkg/net/dnsmsg.go +func (e *EDNS0_UL) pack() ([]byte, error) { + b := make([]byte, 4) + binary.BigEndian.PutUint32(b, e.Lease) + return b, nil +} + +func (e *EDNS0_UL) unpack(b []byte) error { + if len(b) < 4 { + return ErrBuf + } + e.Lease = binary.BigEndian.Uint32(b) + return nil +} + +// EDNS0_LLQ stands for Long Lived Queries: http://tools.ietf.org/html/draft-sekar-dns-llq-01 +// Implemented for completeness, as the EDNS0 type code is assigned. +type EDNS0_LLQ struct { + Code uint16 // Always EDNS0LLQ + Version uint16 + Opcode uint16 + Error uint16 + Id uint64 + LeaseLife uint32 +} + +func (e *EDNS0_LLQ) Option() uint16 { return EDNS0LLQ } + +func (e *EDNS0_LLQ) pack() ([]byte, error) { + b := make([]byte, 18) + binary.BigEndian.PutUint16(b[0:], e.Version) + binary.BigEndian.PutUint16(b[2:], e.Opcode) + binary.BigEndian.PutUint16(b[4:], e.Error) + binary.BigEndian.PutUint64(b[6:], e.Id) + binary.BigEndian.PutUint32(b[14:], e.LeaseLife) + return b, nil +} + +func (e *EDNS0_LLQ) unpack(b []byte) error { + if len(b) < 18 { + return ErrBuf + } + e.Version = binary.BigEndian.Uint16(b[0:]) + e.Opcode = binary.BigEndian.Uint16(b[2:]) + e.Error = binary.BigEndian.Uint16(b[4:]) + e.Id = binary.BigEndian.Uint64(b[6:]) + e.LeaseLife = binary.BigEndian.Uint32(b[14:]) + return nil +} + +func (e *EDNS0_LLQ) String() string { + s := strconv.FormatUint(uint64(e.Version), 10) + " " + strconv.FormatUint(uint64(e.Opcode), 10) + + " " + strconv.FormatUint(uint64(e.Error), 10) + " " + strconv.FormatUint(uint64(e.Id), 10) + + " " + strconv.FormatUint(uint64(e.LeaseLife), 10) + return s +} + +type EDNS0_DAU struct { + Code uint16 // Always EDNS0DAU + AlgCode []uint8 +} + +func (e *EDNS0_DAU) Option() uint16 { return EDNS0DAU } +func (e *EDNS0_DAU) pack() ([]byte, error) { return e.AlgCode, nil } +func (e *EDNS0_DAU) unpack(b []byte) error { e.AlgCode = b; return nil } + +func (e *EDNS0_DAU) String() string { + s := "" + for i := 0; i < len(e.AlgCode); i++ { + if a, ok := AlgorithmToString[e.AlgCode[i]]; ok { + s += " " + a + } else { + s += " " + strconv.Itoa(int(e.AlgCode[i])) + } + } + return s +} + +type EDNS0_DHU struct { + Code uint16 // Always EDNS0DHU + AlgCode []uint8 +} + +func (e *EDNS0_DHU) Option() uint16 { return EDNS0DHU } +func (e *EDNS0_DHU) pack() ([]byte, error) { return e.AlgCode, nil } +func (e *EDNS0_DHU) unpack(b []byte) error { e.AlgCode = b; return nil } + +func (e *EDNS0_DHU) String() string { + s := "" + for i := 0; i < len(e.AlgCode); i++ { + if a, ok := HashToString[e.AlgCode[i]]; ok { + s += " " + a + } else { + s += " " + strconv.Itoa(int(e.AlgCode[i])) + } + } + return s +} + +type EDNS0_N3U struct { + Code uint16 // Always EDNS0N3U + AlgCode []uint8 +} + +func (e *EDNS0_N3U) Option() uint16 { return EDNS0N3U } +func (e *EDNS0_N3U) pack() ([]byte, error) { return e.AlgCode, nil } +func (e *EDNS0_N3U) unpack(b []byte) error { e.AlgCode = b; return nil } + +func (e *EDNS0_N3U) String() string { + // Re-use the hash map + s := "" + for i := 0; i < len(e.AlgCode); i++ { + if a, ok := HashToString[e.AlgCode[i]]; ok { + s += " " + a + } else { + s += " " + strconv.Itoa(int(e.AlgCode[i])) + } + } + return s +} + +type EDNS0_EXPIRE struct { + Code uint16 // Always EDNS0EXPIRE + Expire uint32 +} + +func (e *EDNS0_EXPIRE) Option() uint16 { return EDNS0EXPIRE } +func (e *EDNS0_EXPIRE) String() string { return strconv.FormatUint(uint64(e.Expire), 10) } + +func (e *EDNS0_EXPIRE) pack() ([]byte, error) { + b := make([]byte, 4) + b[0] = byte(e.Expire >> 24) + b[1] = byte(e.Expire >> 16) + b[2] = byte(e.Expire >> 8) + b[3] = byte(e.Expire) + return b, nil +} + +func (e *EDNS0_EXPIRE) unpack(b []byte) error { + if len(b) < 4 { + return ErrBuf + } + e.Expire = binary.BigEndian.Uint32(b) + return nil +} + +// The EDNS0_LOCAL option is used for local/experimental purposes. The option +// code is recommended to be within the range [EDNS0LOCALSTART, EDNS0LOCALEND] +// (RFC6891), although any unassigned code can actually be used. The content of +// the option is made available in Data, unaltered. +// Basic use pattern for creating a local option: +// +// o := new(dns.OPT) +// o.Hdr.Name = "." +// o.Hdr.Rrtype = dns.TypeOPT +// e := new(dns.EDNS0_LOCAL) +// e.Code = dns.EDNS0LOCALSTART +// e.Data = []byte{72, 82, 74} +// o.Option = append(o.Option, e) +type EDNS0_LOCAL struct { + Code uint16 + Data []byte +} + +func (e *EDNS0_LOCAL) Option() uint16 { return e.Code } +func (e *EDNS0_LOCAL) String() string { + return strconv.FormatInt(int64(e.Code), 10) + ":0x" + hex.EncodeToString(e.Data) +} + +func (e *EDNS0_LOCAL) pack() ([]byte, error) { + b := make([]byte, len(e.Data)) + copied := copy(b, e.Data) + if copied != len(e.Data) { + return nil, ErrBuf + } + return b, nil +} + +func (e *EDNS0_LOCAL) unpack(b []byte) error { + e.Data = make([]byte, len(b)) + copied := copy(e.Data, b) + if copied != len(b) { + return ErrBuf + } + return nil +} + +type EDNS0_TCP_KEEPALIVE struct { + Code uint16 // Always EDNSTCPKEEPALIVE + Length uint16 // the value 0 if the TIMEOUT is omitted, the value 2 if it is present; + Timeout uint16 // an idle timeout value for the TCP connection, specified in units of 100 milliseconds, encoded in network byte order. +} + +func (e *EDNS0_TCP_KEEPALIVE) Option() uint16 { + return EDNS0TCPKEEPALIVE +} + +func (e *EDNS0_TCP_KEEPALIVE) pack() ([]byte, error) { + if e.Timeout != 0 && e.Length != 2 { + return nil, errors.New("dns: timeout specified but length is not 2") + } + if e.Timeout == 0 && e.Length != 0 { + return nil, errors.New("dns: timeout not specified but length is not 0") + } + b := make([]byte, 4+e.Length) + binary.BigEndian.PutUint16(b[0:], e.Code) + binary.BigEndian.PutUint16(b[2:], e.Length) + if e.Length == 2 { + binary.BigEndian.PutUint16(b[4:], e.Timeout) + } + return b, nil +} + +func (e *EDNS0_TCP_KEEPALIVE) unpack(b []byte) error { + if len(b) < 4 { + return ErrBuf + } + e.Length = binary.BigEndian.Uint16(b[2:4]) + if e.Length != 0 && e.Length != 2 { + return errors.New("dns: length mismatch, want 0/2 but got " + strconv.FormatUint(uint64(e.Length), 10)) + } + if e.Length == 2 { + if len(b) < 6 { + return ErrBuf + } + e.Timeout = binary.BigEndian.Uint16(b[4:6]) + } + return nil +} + +func (e *EDNS0_TCP_KEEPALIVE) String() (s string) { + s = "use tcp keep-alive" + if e.Length == 0 { + s += ", timeout omitted" + } else { + s += fmt.Sprintf(", timeout %dms", e.Timeout*100) + } + return +} diff --git a/vendor/github.com/miekg/dns/format.go b/vendor/github.com/miekg/dns/format.go new file mode 100644 index 00000000..3f5303c2 --- /dev/null +++ b/vendor/github.com/miekg/dns/format.go @@ -0,0 +1,87 @@ +package dns + +import ( + "net" + "reflect" + "strconv" +) + +// NumField returns the number of rdata fields r has. +func NumField(r RR) int { + return reflect.ValueOf(r).Elem().NumField() - 1 // Remove RR_Header +} + +// Field returns the rdata field i as a string. Fields are indexed starting from 1. +// RR types that holds slice data, for instance the NSEC type bitmap will return a single +// string where the types are concatenated using a space. +// Accessing non existing fields will cause a panic. +func Field(r RR, i int) string { + if i == 0 { + return "" + } + d := reflect.ValueOf(r).Elem().Field(i) + switch k := d.Kind(); k { + case reflect.String: + return d.String() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return strconv.FormatInt(d.Int(), 10) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return strconv.FormatUint(d.Uint(), 10) + case reflect.Slice: + switch reflect.ValueOf(r).Elem().Type().Field(i).Tag { + case `dns:"a"`: + // TODO(miek): Hmm store this as 16 bytes + if d.Len() < net.IPv6len { + return net.IPv4(byte(d.Index(0).Uint()), + byte(d.Index(1).Uint()), + byte(d.Index(2).Uint()), + byte(d.Index(3).Uint())).String() + } + return net.IPv4(byte(d.Index(12).Uint()), + byte(d.Index(13).Uint()), + byte(d.Index(14).Uint()), + byte(d.Index(15).Uint())).String() + case `dns:"aaaa"`: + return net.IP{ + byte(d.Index(0).Uint()), + byte(d.Index(1).Uint()), + byte(d.Index(2).Uint()), + byte(d.Index(3).Uint()), + byte(d.Index(4).Uint()), + byte(d.Index(5).Uint()), + byte(d.Index(6).Uint()), + byte(d.Index(7).Uint()), + byte(d.Index(8).Uint()), + byte(d.Index(9).Uint()), + byte(d.Index(10).Uint()), + byte(d.Index(11).Uint()), + byte(d.Index(12).Uint()), + byte(d.Index(13).Uint()), + byte(d.Index(14).Uint()), + byte(d.Index(15).Uint()), + }.String() + case `dns:"nsec"`: + if d.Len() == 0 { + return "" + } + s := Type(d.Index(0).Uint()).String() + for i := 1; i < d.Len(); i++ { + s += " " + Type(d.Index(i).Uint()).String() + } + return s + default: + // if it does not have a tag its a string slice + fallthrough + case `dns:"txt"`: + if d.Len() == 0 { + return "" + } + s := d.Index(0).String() + for i := 1; i < d.Len(); i++ { + s += " " + d.Index(i).String() + } + return s + } + } + return "" +} diff --git a/vendor/github.com/miekg/dns/generate.go b/vendor/github.com/miekg/dns/generate.go new file mode 100644 index 00000000..e4481a4b --- /dev/null +++ b/vendor/github.com/miekg/dns/generate.go @@ -0,0 +1,159 @@ +package dns + +import ( + "bytes" + "errors" + "fmt" + "strconv" + "strings" +) + +// Parse the $GENERATE statement as used in BIND9 zones. +// See http://www.zytrax.com/books/dns/ch8/generate.html for instance. +// We are called after '$GENERATE '. After which we expect: +// * the range (12-24/2) +// * lhs (ownername) +// * [[ttl][class]] +// * type +// * rhs (rdata) +// But we are lazy here, only the range is parsed *all* occurrences +// of $ after that are interpreted. +// Any error are returned as a string value, the empty string signals +// "no error". +func generate(l lex, c chan lex, t chan *Token, o string) string { + step := 1 + if i := strings.IndexAny(l.token, "/"); i != -1 { + if i+1 == len(l.token) { + return "bad step in $GENERATE range" + } + if s, err := strconv.Atoi(l.token[i+1:]); err == nil { + if s < 0 { + return "bad step in $GENERATE range" + } + step = s + } else { + return "bad step in $GENERATE range" + } + l.token = l.token[:i] + } + sx := strings.SplitN(l.token, "-", 2) + if len(sx) != 2 { + return "bad start-stop in $GENERATE range" + } + start, err := strconv.Atoi(sx[0]) + if err != nil { + return "bad start in $GENERATE range" + } + end, err := strconv.Atoi(sx[1]) + if err != nil { + return "bad stop in $GENERATE range" + } + if end < 0 || start < 0 || end < start { + return "bad range in $GENERATE range" + } + + <-c // _BLANK + // Create a complete new string, which we then parse again. + s := "" +BuildRR: + l = <-c + if l.value != zNewline && l.value != zEOF { + s += l.token + goto BuildRR + } + for i := start; i <= end; i += step { + var ( + escape bool + dom bytes.Buffer + mod string + err error + offset int + ) + + for j := 0; j < len(s); j++ { // No 'range' because we need to jump around + switch s[j] { + case '\\': + if escape { + dom.WriteByte('\\') + escape = false + continue + } + escape = true + case '$': + mod = "%d" + offset = 0 + if escape { + dom.WriteByte('$') + escape = false + continue + } + escape = false + if j+1 >= len(s) { // End of the string + dom.WriteString(fmt.Sprintf(mod, i+offset)) + continue + } else { + if s[j+1] == '$' { + dom.WriteByte('$') + j++ + continue + } + } + // Search for { and } + if s[j+1] == '{' { // Modifier block + sep := strings.Index(s[j+2:], "}") + if sep == -1 { + return "bad modifier in $GENERATE" + } + mod, offset, err = modToPrintf(s[j+2 : j+2+sep]) + if err != nil { + return err.Error() + } + j += 2 + sep // Jump to it + } + dom.WriteString(fmt.Sprintf(mod, i+offset)) + default: + if escape { // Pretty useless here + escape = false + continue + } + dom.WriteByte(s[j]) + } + } + // Re-parse the RR and send it on the current channel t + rx, err := NewRR("$ORIGIN " + o + "\n" + dom.String()) + if err != nil { + return err.Error() + } + t <- &Token{RR: rx} + // Its more efficient to first built the rrlist and then parse it in + // one go! But is this a problem? + } + return "" +} + +// Convert a $GENERATE modifier 0,0,d to something Printf can deal with. +func modToPrintf(s string) (string, int, error) { + xs := strings.SplitN(s, ",", 3) + if len(xs) != 3 { + return "", 0, errors.New("bad modifier in $GENERATE") + } + // xs[0] is offset, xs[1] is width, xs[2] is base + if xs[2] != "o" && xs[2] != "d" && xs[2] != "x" && xs[2] != "X" { + return "", 0, errors.New("bad base in $GENERATE") + } + offset, err := strconv.Atoi(xs[0]) + if err != nil || offset > 255 { + return "", 0, errors.New("bad offset in $GENERATE") + } + width, err := strconv.Atoi(xs[1]) + if err != nil || width > 255 { + return "", offset, errors.New("bad width in $GENERATE") + } + switch { + case width < 0: + return "", offset, errors.New("bad width in $GENERATE") + case width == 0: + return "%" + xs[1] + xs[2], offset, nil + } + return "%0" + xs[1] + xs[2], offset, nil +} diff --git a/vendor/github.com/miekg/dns/labels.go b/vendor/github.com/miekg/dns/labels.go new file mode 100644 index 00000000..fca5c7dd --- /dev/null +++ b/vendor/github.com/miekg/dns/labels.go @@ -0,0 +1,168 @@ +package dns + +// Holds a bunch of helper functions for dealing with labels. + +// SplitDomainName splits a name string into it's labels. +// www.miek.nl. returns []string{"www", "miek", "nl"} +// .www.miek.nl. returns []string{"", "www", "miek", "nl"}, +// The root label (.) returns nil. Note that using +// strings.Split(s) will work in most cases, but does not handle +// escaped dots (\.) for instance. +// s must be a syntactically valid domain name, see IsDomainName. +func SplitDomainName(s string) (labels []string) { + if len(s) == 0 { + return nil + } + fqdnEnd := 0 // offset of the final '.' or the length of the name + idx := Split(s) + begin := 0 + if s[len(s)-1] == '.' { + fqdnEnd = len(s) - 1 + } else { + fqdnEnd = len(s) + } + + switch len(idx) { + case 0: + return nil + case 1: + // no-op + default: + end := 0 + for i := 1; i < len(idx); i++ { + end = idx[i] + labels = append(labels, s[begin:end-1]) + begin = end + } + } + + labels = append(labels, s[begin:fqdnEnd]) + return labels +} + +// CompareDomainName compares the names s1 and s2 and +// returns how many labels they have in common starting from the *right*. +// The comparison stops at the first inequality. The names are not downcased +// before the comparison. +// +// www.miek.nl. and miek.nl. have two labels in common: miek and nl +// www.miek.nl. and www.bla.nl. have one label in common: nl +// +// s1 and s2 must be syntactically valid domain names. +func CompareDomainName(s1, s2 string) (n int) { + s1 = Fqdn(s1) + s2 = Fqdn(s2) + l1 := Split(s1) + l2 := Split(s2) + + // the first check: root label + if l1 == nil || l2 == nil { + return + } + + j1 := len(l1) - 1 // end + i1 := len(l1) - 2 // start + j2 := len(l2) - 1 + i2 := len(l2) - 2 + // the second check can be done here: last/only label + // before we fall through into the for-loop below + if s1[l1[j1]:] == s2[l2[j2]:] { + n++ + } else { + return + } + for { + if i1 < 0 || i2 < 0 { + break + } + if s1[l1[i1]:l1[j1]] == s2[l2[i2]:l2[j2]] { + n++ + } else { + break + } + j1-- + i1-- + j2-- + i2-- + } + return +} + +// CountLabel counts the the number of labels in the string s. +// s must be a syntactically valid domain name. +func CountLabel(s string) (labels int) { + if s == "." { + return + } + off := 0 + end := false + for { + off, end = NextLabel(s, off) + labels++ + if end { + return + } + } +} + +// Split splits a name s into its label indexes. +// www.miek.nl. returns []int{0, 4, 9}, www.miek.nl also returns []int{0, 4, 9}. +// The root name (.) returns nil. Also see SplitDomainName. +// s must be a syntactically valid domain name. +func Split(s string) []int { + if s == "." { + return nil + } + idx := make([]int, 1, 3) + off := 0 + end := false + + for { + off, end = NextLabel(s, off) + if end { + return idx + } + idx = append(idx, off) + } +} + +// NextLabel returns the index of the start of the next label in the +// string s starting at offset. +// The bool end is true when the end of the string has been reached. +// Also see PrevLabel. +func NextLabel(s string, offset int) (i int, end bool) { + quote := false + for i = offset; i < len(s)-1; i++ { + switch s[i] { + case '\\': + quote = !quote + default: + quote = false + case '.': + if quote { + quote = !quote + continue + } + return i + 1, false + } + } + return i + 1, true +} + +// PrevLabel returns the index of the label when starting from the right and +// jumping n labels to the left. +// The bool start is true when the start of the string has been overshot. +// Also see NextLabel. +func PrevLabel(s string, n int) (i int, start bool) { + if n == 0 { + return len(s), false + } + lab := Split(s) + if lab == nil { + return 0, true + } + if n > len(lab) { + return 0, true + } + return lab[len(lab)-n], false +} diff --git a/vendor/github.com/miekg/dns/msg.go b/vendor/github.com/miekg/dns/msg.go new file mode 100644 index 00000000..a9acd1e9 --- /dev/null +++ b/vendor/github.com/miekg/dns/msg.go @@ -0,0 +1,1231 @@ +// DNS packet assembly, see RFC 1035. Converting from - Unpack() - +// and to - Pack() - wire format. +// All the packers and unpackers take a (msg []byte, off int) +// and return (off1 int, ok bool). If they return ok==false, they +// also return off1==len(msg), so that the next unpacker will +// also fail. This lets us avoid checks of ok until the end of a +// packing sequence. + +package dns + +//go:generate go run msg_generate.go + +import ( + crand "crypto/rand" + "encoding/binary" + "math/big" + "math/rand" + "strconv" +) + +func init() { + // Initialize default math/rand source using crypto/rand to provide better + // security without the performance trade-off. + buf := make([]byte, 8) + _, err := crand.Read(buf) + if err != nil { + // Failed to read from cryptographic source, fallback to default initial + // seed (1) by returning early + return + } + seed := binary.BigEndian.Uint64(buf) + rand.Seed(int64(seed)) +} + +const maxCompressionOffset = 2 << 13 // We have 14 bits for the compression pointer + +var ( + ErrAlg error = &Error{err: "bad algorithm"} // ErrAlg indicates an error with the (DNSSEC) algorithm. + ErrAuth error = &Error{err: "bad authentication"} // ErrAuth indicates an error in the TSIG authentication. + ErrBuf error = &Error{err: "buffer size too small"} // ErrBuf indicates that the buffer used it too small for the message. + ErrConnEmpty error = &Error{err: "conn has no connection"} // ErrConnEmpty indicates a connection is being uses before it is initialized. + ErrExtendedRcode error = &Error{err: "bad extended rcode"} // ErrExtendedRcode ... + ErrFqdn error = &Error{err: "domain must be fully qualified"} // ErrFqdn indicates that a domain name does not have a closing dot. + ErrId error = &Error{err: "id mismatch"} // ErrId indicates there is a mismatch with the message's ID. + ErrKeyAlg error = &Error{err: "bad key algorithm"} // ErrKeyAlg indicates that the algorithm in the key is not valid. + ErrKey error = &Error{err: "bad key"} + ErrKeySize error = &Error{err: "bad key size"} + ErrNoSig error = &Error{err: "no signature found"} + ErrPrivKey error = &Error{err: "bad private key"} + ErrRcode error = &Error{err: "bad rcode"} + ErrRdata error = &Error{err: "bad rdata"} + ErrRRset error = &Error{err: "bad rrset"} + ErrSecret error = &Error{err: "no secrets defined"} + ErrShortRead error = &Error{err: "short read"} + ErrSig error = &Error{err: "bad signature"} // ErrSig indicates that a signature can not be cryptographically validated. + ErrSoa error = &Error{err: "no SOA"} // ErrSOA indicates that no SOA RR was seen when doing zone transfers. + ErrTime error = &Error{err: "bad time"} // ErrTime indicates a timing error in TSIG authentication. + ErrTruncated error = &Error{err: "failed to unpack truncated message"} // ErrTruncated indicates that we failed to unpack a truncated message. We unpacked as much as we had so Msg can still be used, if desired. +) + +// Id by default, returns a 16 bits random number to be used as a +// message id. The random provided should be good enough. This being a +// variable the function can be reassigned to a custom function. +// For instance, to make it return a static value: +// +// dns.Id = func() uint16 { return 3 } +var Id func() uint16 = id + +// id returns a 16 bits random number to be used as a +// message id. The random provided should be good enough. +func id() uint16 { + id32 := rand.Uint32() + return uint16(id32) +} + +// MsgHdr is a a manually-unpacked version of (id, bits). +type MsgHdr struct { + Id uint16 + Response bool + Opcode int + Authoritative bool + Truncated bool + RecursionDesired bool + RecursionAvailable bool + Zero bool + AuthenticatedData bool + CheckingDisabled bool + Rcode int +} + +// Msg contains the layout of a DNS message. +type Msg struct { + MsgHdr + Compress bool `json:"-"` // If true, the message will be compressed when converted to wire format. + Question []Question // Holds the RR(s) of the question section. + Answer []RR // Holds the RR(s) of the answer section. + Ns []RR // Holds the RR(s) of the authority section. + Extra []RR // Holds the RR(s) of the additional section. +} + +// ClassToString is a maps Classes to strings for each CLASS wire type. +var ClassToString = map[uint16]string{ + ClassINET: "IN", + ClassCSNET: "CS", + ClassCHAOS: "CH", + ClassHESIOD: "HS", + ClassNONE: "NONE", + ClassANY: "ANY", +} + +// OpcodeToString maps Opcodes to strings. +var OpcodeToString = map[int]string{ + OpcodeQuery: "QUERY", + OpcodeIQuery: "IQUERY", + OpcodeStatus: "STATUS", + OpcodeNotify: "NOTIFY", + OpcodeUpdate: "UPDATE", +} + +// RcodeToString maps Rcodes to strings. +var RcodeToString = map[int]string{ + RcodeSuccess: "NOERROR", + RcodeFormatError: "FORMERR", + RcodeServerFailure: "SERVFAIL", + RcodeNameError: "NXDOMAIN", + RcodeNotImplemented: "NOTIMPL", + RcodeRefused: "REFUSED", + RcodeYXDomain: "YXDOMAIN", // See RFC 2136 + RcodeYXRrset: "YXRRSET", + RcodeNXRrset: "NXRRSET", + RcodeNotAuth: "NOTAUTH", + RcodeNotZone: "NOTZONE", + RcodeBadSig: "BADSIG", // Also known as RcodeBadVers, see RFC 6891 + // RcodeBadVers: "BADVERS", + RcodeBadKey: "BADKEY", + RcodeBadTime: "BADTIME", + RcodeBadMode: "BADMODE", + RcodeBadName: "BADNAME", + RcodeBadAlg: "BADALG", + RcodeBadTrunc: "BADTRUNC", + RcodeBadCookie: "BADCOOKIE", +} + +// Domain names are a sequence of counted strings +// split at the dots. They end with a zero-length string. + +// PackDomainName packs a domain name s into msg[off:]. +// If compression is wanted compress must be true and the compression +// map needs to hold a mapping between domain names and offsets +// pointing into msg. +func PackDomainName(s string, msg []byte, off int, compression map[string]int, compress bool) (off1 int, err error) { + off1, _, err = packDomainName(s, msg, off, compression, compress) + return +} + +func packDomainName(s string, msg []byte, off int, compression map[string]int, compress bool) (off1 int, labels int, err error) { + // special case if msg == nil + lenmsg := 256 + if msg != nil { + lenmsg = len(msg) + } + ls := len(s) + if ls == 0 { // Ok, for instance when dealing with update RR without any rdata. + return off, 0, nil + } + // If not fully qualified, error out, but only if msg == nil #ugly + switch { + case msg == nil: + if s[ls-1] != '.' { + s += "." + ls++ + } + case msg != nil: + if s[ls-1] != '.' { + return lenmsg, 0, ErrFqdn + } + } + // Each dot ends a segment of the name. + // We trade each dot byte for a length byte. + // Except for escaped dots (\.), which are normal dots. + // There is also a trailing zero. + + // Compression + nameoffset := -1 + pointer := -1 + // Emit sequence of counted strings, chopping at dots. + begin := 0 + bs := []byte(s) + roBs, bsFresh, escapedDot := s, true, false + for i := 0; i < ls; i++ { + if bs[i] == '\\' { + for j := i; j < ls-1; j++ { + bs[j] = bs[j+1] + } + ls-- + if off+1 > lenmsg { + return lenmsg, labels, ErrBuf + } + // check for \DDD + if i+2 < ls && isDigit(bs[i]) && isDigit(bs[i+1]) && isDigit(bs[i+2]) { + bs[i] = dddToByte(bs[i:]) + for j := i + 1; j < ls-2; j++ { + bs[j] = bs[j+2] + } + ls -= 2 + } else if bs[i] == 't' { + bs[i] = '\t' + } else if bs[i] == 'r' { + bs[i] = '\r' + } else if bs[i] == 'n' { + bs[i] = '\n' + } + escapedDot = bs[i] == '.' + bsFresh = false + continue + } + + if bs[i] == '.' { + if i > 0 && bs[i-1] == '.' && !escapedDot { + // two dots back to back is not legal + return lenmsg, labels, ErrRdata + } + if i-begin >= 1<<6 { // top two bits of length must be clear + return lenmsg, labels, ErrRdata + } + // off can already (we're in a loop) be bigger than len(msg) + // this happens when a name isn't fully qualified + if off+1 > lenmsg { + return lenmsg, labels, ErrBuf + } + if msg != nil { + msg[off] = byte(i - begin) + } + offset := off + off++ + for j := begin; j < i; j++ { + if off+1 > lenmsg { + return lenmsg, labels, ErrBuf + } + if msg != nil { + msg[off] = bs[j] + } + off++ + } + if compress && !bsFresh { + roBs = string(bs) + bsFresh = true + } + // Don't try to compress '.' + if compress && roBs[begin:] != "." { + if p, ok := compression[roBs[begin:]]; !ok { + // Only offsets smaller than this can be used. + if offset < maxCompressionOffset { + compression[roBs[begin:]] = offset + } + } else { + // The first hit is the longest matching dname + // keep the pointer offset we get back and store + // the offset of the current name, because that's + // where we need to insert the pointer later + + // If compress is true, we're allowed to compress this dname + if pointer == -1 && compress { + pointer = p // Where to point to + nameoffset = offset // Where to point from + break + } + } + } + labels++ + begin = i + 1 + } + escapedDot = false + } + // Root label is special + if len(bs) == 1 && bs[0] == '.' { + return off, labels, nil + } + // If we did compression and we find something add the pointer here + if pointer != -1 { + // We have two bytes (14 bits) to put the pointer in + // if msg == nil, we will never do compression + binary.BigEndian.PutUint16(msg[nameoffset:], uint16(pointer^0xC000)) + off = nameoffset + 1 + goto End + } + if msg != nil && off < len(msg) { + msg[off] = 0 + } +End: + off++ + return off, labels, nil +} + +// Unpack a domain name. +// In addition to the simple sequences of counted strings above, +// domain names are allowed to refer to strings elsewhere in the +// packet, to avoid repeating common suffixes when returning +// many entries in a single domain. The pointers are marked +// by a length byte with the top two bits set. Ignoring those +// two bits, that byte and the next give a 14 bit offset from msg[0] +// where we should pick up the trail. +// Note that if we jump elsewhere in the packet, +// we return off1 == the offset after the first pointer we found, +// which is where the next record will start. +// In theory, the pointers are only allowed to jump backward. +// We let them jump anywhere and stop jumping after a while. + +// UnpackDomainName unpacks a domain name into a string. +func UnpackDomainName(msg []byte, off int) (string, int, error) { + s := make([]byte, 0, 64) + off1 := 0 + lenmsg := len(msg) + ptr := 0 // number of pointers followed +Loop: + for { + if off >= lenmsg { + return "", lenmsg, ErrBuf + } + c := int(msg[off]) + off++ + switch c & 0xC0 { + case 0x00: + if c == 0x00 { + // end of name + break Loop + } + // literal string + if off+c > lenmsg { + return "", lenmsg, ErrBuf + } + for j := off; j < off+c; j++ { + switch b := msg[j]; b { + case '.', '(', ')', ';', ' ', '@': + fallthrough + case '"', '\\': + s = append(s, '\\', b) + case '\t': + s = append(s, '\\', 't') + case '\r': + s = append(s, '\\', 'r') + default: + if b < 32 || b >= 127 { // unprintable use \DDD + var buf [3]byte + bufs := strconv.AppendInt(buf[:0], int64(b), 10) + s = append(s, '\\') + for i := 0; i < 3-len(bufs); i++ { + s = append(s, '0') + } + for _, r := range bufs { + s = append(s, r) + } + } else { + s = append(s, b) + } + } + } + s = append(s, '.') + off += c + case 0xC0: + // pointer to somewhere else in msg. + // remember location after first ptr, + // since that's how many bytes we consumed. + // also, don't follow too many pointers -- + // maybe there's a loop. + if off >= lenmsg { + return "", lenmsg, ErrBuf + } + c1 := msg[off] + off++ + if ptr == 0 { + off1 = off + } + if ptr++; ptr > 10 { + return "", lenmsg, &Error{err: "too many compression pointers"} + } + off = (c^0xC0)<<8 | int(c1) + default: + // 0x80 and 0x40 are reserved + return "", lenmsg, ErrRdata + } + } + if ptr == 0 { + off1 = off + } + if len(s) == 0 { + s = []byte(".") + } + return string(s), off1, nil +} + +func packTxt(txt []string, msg []byte, offset int, tmp []byte) (int, error) { + if len(txt) == 0 { + if offset >= len(msg) { + return offset, ErrBuf + } + msg[offset] = 0 + return offset, nil + } + var err error + for i := range txt { + if len(txt[i]) > len(tmp) { + return offset, ErrBuf + } + offset, err = packTxtString(txt[i], msg, offset, tmp) + if err != nil { + return offset, err + } + } + return offset, nil +} + +func packTxtString(s string, msg []byte, offset int, tmp []byte) (int, error) { + lenByteOffset := offset + if offset >= len(msg) || len(s) > len(tmp) { + return offset, ErrBuf + } + offset++ + bs := tmp[:len(s)] + copy(bs, s) + for i := 0; i < len(bs); i++ { + if len(msg) <= offset { + return offset, ErrBuf + } + if bs[i] == '\\' { + i++ + if i == len(bs) { + break + } + // check for \DDD + if i+2 < len(bs) && isDigit(bs[i]) && isDigit(bs[i+1]) && isDigit(bs[i+2]) { + msg[offset] = dddToByte(bs[i:]) + i += 2 + } else if bs[i] == 't' { + msg[offset] = '\t' + } else if bs[i] == 'r' { + msg[offset] = '\r' + } else if bs[i] == 'n' { + msg[offset] = '\n' + } else { + msg[offset] = bs[i] + } + } else { + msg[offset] = bs[i] + } + offset++ + } + l := offset - lenByteOffset - 1 + if l > 255 { + return offset, &Error{err: "string exceeded 255 bytes in txt"} + } + msg[lenByteOffset] = byte(l) + return offset, nil +} + +func packOctetString(s string, msg []byte, offset int, tmp []byte) (int, error) { + if offset >= len(msg) || len(s) > len(tmp) { + return offset, ErrBuf + } + bs := tmp[:len(s)] + copy(bs, s) + for i := 0; i < len(bs); i++ { + if len(msg) <= offset { + return offset, ErrBuf + } + if bs[i] == '\\' { + i++ + if i == len(bs) { + break + } + // check for \DDD + if i+2 < len(bs) && isDigit(bs[i]) && isDigit(bs[i+1]) && isDigit(bs[i+2]) { + msg[offset] = dddToByte(bs[i:]) + i += 2 + } else { + msg[offset] = bs[i] + } + } else { + msg[offset] = bs[i] + } + offset++ + } + return offset, nil +} + +func unpackTxt(msg []byte, off0 int) (ss []string, off int, err error) { + off = off0 + var s string + for off < len(msg) && err == nil { + s, off, err = unpackTxtString(msg, off) + if err == nil { + ss = append(ss, s) + } + } + return +} + +func unpackTxtString(msg []byte, offset int) (string, int, error) { + if offset+1 > len(msg) { + return "", offset, &Error{err: "overflow unpacking txt"} + } + l := int(msg[offset]) + if offset+l+1 > len(msg) { + return "", offset, &Error{err: "overflow unpacking txt"} + } + s := make([]byte, 0, l) + for _, b := range msg[offset+1 : offset+1+l] { + switch b { + case '"', '\\': + s = append(s, '\\', b) + case '\t': + s = append(s, `\t`...) + case '\r': + s = append(s, `\r`...) + case '\n': + s = append(s, `\n`...) + default: + if b < 32 || b > 127 { // unprintable + var buf [3]byte + bufs := strconv.AppendInt(buf[:0], int64(b), 10) + s = append(s, '\\') + for i := 0; i < 3-len(bufs); i++ { + s = append(s, '0') + } + for _, r := range bufs { + s = append(s, r) + } + } else { + s = append(s, b) + } + } + } + offset += 1 + l + return string(s), offset, nil +} + +// Helpers for dealing with escaped bytes +func isDigit(b byte) bool { return b >= '0' && b <= '9' } + +func dddToByte(s []byte) byte { + return byte((s[0]-'0')*100 + (s[1]-'0')*10 + (s[2] - '0')) +} + +// Helper function for packing and unpacking +func intToBytes(i *big.Int, length int) []byte { + buf := i.Bytes() + if len(buf) < length { + b := make([]byte, length) + copy(b[length-len(buf):], buf) + return b + } + return buf +} + +// PackRR packs a resource record rr into msg[off:]. +// See PackDomainName for documentation about the compression. +func PackRR(rr RR, msg []byte, off int, compression map[string]int, compress bool) (off1 int, err error) { + if rr == nil { + return len(msg), &Error{err: "nil rr"} + } + + off1, err = rr.pack(msg, off, compression, compress) + if err != nil { + return len(msg), err + } + // TODO(miek): Not sure if this is needed? If removed we can remove rawmsg.go as well. + if rawSetRdlength(msg, off, off1) { + return off1, nil + } + return off, ErrRdata +} + +// UnpackRR unpacks msg[off:] into an RR. +func UnpackRR(msg []byte, off int) (rr RR, off1 int, err error) { + h, off, msg, err := unpackHeader(msg, off) + if err != nil { + return nil, len(msg), err + } + end := off + int(h.Rdlength) + + if fn, known := typeToUnpack[h.Rrtype]; !known { + rr, off, err = unpackRFC3597(h, msg, off) + } else { + rr, off, err = fn(h, msg, off) + } + if off != end { + return &h, end, &Error{err: "bad rdlength"} + } + return rr, off, err +} + +// unpackRRslice unpacks msg[off:] into an []RR. +// If we cannot unpack the whole array, then it will return nil +func unpackRRslice(l int, msg []byte, off int) (dst1 []RR, off1 int, err error) { + var r RR + // Optimistically make dst be the length that was sent + dst := make([]RR, 0, l) + for i := 0; i < l; i++ { + off1 := off + r, off, err = UnpackRR(msg, off) + if err != nil { + off = len(msg) + break + } + // If offset does not increase anymore, l is a lie + if off1 == off { + l = i + break + } + dst = append(dst, r) + } + if err != nil && off == len(msg) { + dst = nil + } + return dst, off, err +} + +// Convert a MsgHdr to a string, with dig-like headers: +// +//;; opcode: QUERY, status: NOERROR, id: 48404 +// +//;; flags: qr aa rd ra; +func (h *MsgHdr) String() string { + if h == nil { + return " MsgHdr" + } + + s := ";; opcode: " + OpcodeToString[h.Opcode] + s += ", status: " + RcodeToString[h.Rcode] + s += ", id: " + strconv.Itoa(int(h.Id)) + "\n" + + s += ";; flags:" + if h.Response { + s += " qr" + } + if h.Authoritative { + s += " aa" + } + if h.Truncated { + s += " tc" + } + if h.RecursionDesired { + s += " rd" + } + if h.RecursionAvailable { + s += " ra" + } + if h.Zero { // Hmm + s += " z" + } + if h.AuthenticatedData { + s += " ad" + } + if h.CheckingDisabled { + s += " cd" + } + + s += ";" + return s +} + +// Pack packs a Msg: it is converted to to wire format. +// If the dns.Compress is true the message will be in compressed wire format. +func (dns *Msg) Pack() (msg []byte, err error) { + return dns.PackBuffer(nil) +} + +// PackBuffer packs a Msg, using the given buffer buf. If buf is too small +// a new buffer is allocated. +func (dns *Msg) PackBuffer(buf []byte) (msg []byte, err error) { + // We use a similar function in tsig.go's stripTsig. + var ( + dh Header + compression map[string]int + ) + + if dns.Compress { + compression = make(map[string]int) // Compression pointer mappings + } + + if dns.Rcode < 0 || dns.Rcode > 0xFFF { + return nil, ErrRcode + } + if dns.Rcode > 0xF { + // Regular RCODE field is 4 bits + opt := dns.IsEdns0() + if opt == nil { + return nil, ErrExtendedRcode + } + opt.SetExtendedRcode(uint8(dns.Rcode >> 4)) + dns.Rcode &= 0xF + } + + // Convert convenient Msg into wire-like Header. + dh.Id = dns.Id + dh.Bits = uint16(dns.Opcode)<<11 | uint16(dns.Rcode) + if dns.Response { + dh.Bits |= _QR + } + if dns.Authoritative { + dh.Bits |= _AA + } + if dns.Truncated { + dh.Bits |= _TC + } + if dns.RecursionDesired { + dh.Bits |= _RD + } + if dns.RecursionAvailable { + dh.Bits |= _RA + } + if dns.Zero { + dh.Bits |= _Z + } + if dns.AuthenticatedData { + dh.Bits |= _AD + } + if dns.CheckingDisabled { + dh.Bits |= _CD + } + + // Prepare variable sized arrays. + question := dns.Question + answer := dns.Answer + ns := dns.Ns + extra := dns.Extra + + dh.Qdcount = uint16(len(question)) + dh.Ancount = uint16(len(answer)) + dh.Nscount = uint16(len(ns)) + dh.Arcount = uint16(len(extra)) + + // We need the uncompressed length here, because we first pack it and then compress it. + msg = buf + compress := dns.Compress + dns.Compress = false + if packLen := dns.Len() + 1; len(msg) < packLen { + msg = make([]byte, packLen) + } + dns.Compress = compress + + // Pack it in: header and then the pieces. + off := 0 + off, err = dh.pack(msg, off, compression, dns.Compress) + if err != nil { + return nil, err + } + for i := 0; i < len(question); i++ { + off, err = question[i].pack(msg, off, compression, dns.Compress) + if err != nil { + return nil, err + } + } + for i := 0; i < len(answer); i++ { + off, err = PackRR(answer[i], msg, off, compression, dns.Compress) + if err != nil { + return nil, err + } + } + for i := 0; i < len(ns); i++ { + off, err = PackRR(ns[i], msg, off, compression, dns.Compress) + if err != nil { + return nil, err + } + } + for i := 0; i < len(extra); i++ { + off, err = PackRR(extra[i], msg, off, compression, dns.Compress) + if err != nil { + return nil, err + } + } + return msg[:off], nil +} + +// Unpack unpacks a binary message to a Msg structure. +func (dns *Msg) Unpack(msg []byte) (err error) { + var ( + dh Header + off int + ) + if dh, off, err = unpackMsgHdr(msg, off); err != nil { + return err + } + if off == len(msg) { + return ErrTruncated + } + + dns.Id = dh.Id + dns.Response = (dh.Bits & _QR) != 0 + dns.Opcode = int(dh.Bits>>11) & 0xF + dns.Authoritative = (dh.Bits & _AA) != 0 + dns.Truncated = (dh.Bits & _TC) != 0 + dns.RecursionDesired = (dh.Bits & _RD) != 0 + dns.RecursionAvailable = (dh.Bits & _RA) != 0 + dns.Zero = (dh.Bits & _Z) != 0 + dns.AuthenticatedData = (dh.Bits & _AD) != 0 + dns.CheckingDisabled = (dh.Bits & _CD) != 0 + dns.Rcode = int(dh.Bits & 0xF) + + // Optimistically use the count given to us in the header + dns.Question = make([]Question, 0, int(dh.Qdcount)) + + for i := 0; i < int(dh.Qdcount); i++ { + off1 := off + var q Question + q, off, err = unpackQuestion(msg, off) + if err != nil { + // Even if Truncated is set, we only will set ErrTruncated if we + // actually got the questions + return err + } + if off1 == off { // Offset does not increase anymore, dh.Qdcount is a lie! + dh.Qdcount = uint16(i) + break + } + dns.Question = append(dns.Question, q) + } + + dns.Answer, off, err = unpackRRslice(int(dh.Ancount), msg, off) + // The header counts might have been wrong so we need to update it + dh.Ancount = uint16(len(dns.Answer)) + if err == nil { + dns.Ns, off, err = unpackRRslice(int(dh.Nscount), msg, off) + } + // The header counts might have been wrong so we need to update it + dh.Nscount = uint16(len(dns.Ns)) + if err == nil { + dns.Extra, off, err = unpackRRslice(int(dh.Arcount), msg, off) + } + // The header counts might have been wrong so we need to update it + dh.Arcount = uint16(len(dns.Extra)) + + if off != len(msg) { + // TODO(miek) make this an error? + // use PackOpt to let people tell how detailed the error reporting should be? + // println("dns: extra bytes in dns packet", off, "<", len(msg)) + } else if dns.Truncated { + // Whether we ran into a an error or not, we want to return that it + // was truncated + err = ErrTruncated + } + return err +} + +// Convert a complete message to a string with dig-like output. +func (dns *Msg) String() string { + if dns == nil { + return " MsgHdr" + } + s := dns.MsgHdr.String() + " " + s += "QUERY: " + strconv.Itoa(len(dns.Question)) + ", " + s += "ANSWER: " + strconv.Itoa(len(dns.Answer)) + ", " + s += "AUTHORITY: " + strconv.Itoa(len(dns.Ns)) + ", " + s += "ADDITIONAL: " + strconv.Itoa(len(dns.Extra)) + "\n" + if len(dns.Question) > 0 { + s += "\n;; QUESTION SECTION:\n" + for i := 0; i < len(dns.Question); i++ { + s += dns.Question[i].String() + "\n" + } + } + if len(dns.Answer) > 0 { + s += "\n;; ANSWER SECTION:\n" + for i := 0; i < len(dns.Answer); i++ { + if dns.Answer[i] != nil { + s += dns.Answer[i].String() + "\n" + } + } + } + if len(dns.Ns) > 0 { + s += "\n;; AUTHORITY SECTION:\n" + for i := 0; i < len(dns.Ns); i++ { + if dns.Ns[i] != nil { + s += dns.Ns[i].String() + "\n" + } + } + } + if len(dns.Extra) > 0 { + s += "\n;; ADDITIONAL SECTION:\n" + for i := 0; i < len(dns.Extra); i++ { + if dns.Extra[i] != nil { + s += dns.Extra[i].String() + "\n" + } + } + } + return s +} + +// Len returns the message length when in (un)compressed wire format. +// If dns.Compress is true compression it is taken into account. Len() +// is provided to be a faster way to get the size of the resulting packet, +// than packing it, measuring the size and discarding the buffer. +func (dns *Msg) Len() int { + // We always return one more than needed. + l := 12 // Message header is always 12 bytes + var compression map[string]int + if dns.Compress { + compression = make(map[string]int) + } + for i := 0; i < len(dns.Question); i++ { + l += dns.Question[i].len() + if dns.Compress { + compressionLenHelper(compression, dns.Question[i].Name) + } + } + for i := 0; i < len(dns.Answer); i++ { + if dns.Answer[i] == nil { + continue + } + l += dns.Answer[i].len() + if dns.Compress { + k, ok := compressionLenSearch(compression, dns.Answer[i].Header().Name) + if ok { + l += 1 - k + } + compressionLenHelper(compression, dns.Answer[i].Header().Name) + k, ok = compressionLenSearchType(compression, dns.Answer[i]) + if ok { + l += 1 - k + } + compressionLenHelperType(compression, dns.Answer[i]) + } + } + for i := 0; i < len(dns.Ns); i++ { + if dns.Ns[i] == nil { + continue + } + l += dns.Ns[i].len() + if dns.Compress { + k, ok := compressionLenSearch(compression, dns.Ns[i].Header().Name) + if ok { + l += 1 - k + } + compressionLenHelper(compression, dns.Ns[i].Header().Name) + k, ok = compressionLenSearchType(compression, dns.Ns[i]) + if ok { + l += 1 - k + } + compressionLenHelperType(compression, dns.Ns[i]) + } + } + for i := 0; i < len(dns.Extra); i++ { + if dns.Extra[i] == nil { + continue + } + l += dns.Extra[i].len() + if dns.Compress { + k, ok := compressionLenSearch(compression, dns.Extra[i].Header().Name) + if ok { + l += 1 - k + } + compressionLenHelper(compression, dns.Extra[i].Header().Name) + k, ok = compressionLenSearchType(compression, dns.Extra[i]) + if ok { + l += 1 - k + } + compressionLenHelperType(compression, dns.Extra[i]) + } + } + return l +} + +// Put the parts of the name in the compression map. +func compressionLenHelper(c map[string]int, s string) { + pref := "" + lbs := Split(s) + for j := len(lbs) - 1; j >= 0; j-- { + pref = s[lbs[j]:] + if _, ok := c[pref]; !ok { + c[pref] = len(pref) + } + } +} + +// Look for each part in the compression map and returns its length, +// keep on searching so we get the longest match. +func compressionLenSearch(c map[string]int, s string) (int, bool) { + off := 0 + end := false + if s == "" { // don't bork on bogus data + return 0, false + } + for { + if _, ok := c[s[off:]]; ok { + return len(s[off:]), true + } + if end { + break + } + off, end = NextLabel(s, off) + } + return 0, false +} + +// TODO(miek): should add all types, because the all can be *used* for compression. Autogenerate from msg_generate and put in zmsg.go +func compressionLenHelperType(c map[string]int, r RR) { + switch x := r.(type) { + case *NS: + compressionLenHelper(c, x.Ns) + case *MX: + compressionLenHelper(c, x.Mx) + case *CNAME: + compressionLenHelper(c, x.Target) + case *PTR: + compressionLenHelper(c, x.Ptr) + case *SOA: + compressionLenHelper(c, x.Ns) + compressionLenHelper(c, x.Mbox) + case *MB: + compressionLenHelper(c, x.Mb) + case *MG: + compressionLenHelper(c, x.Mg) + case *MR: + compressionLenHelper(c, x.Mr) + case *MF: + compressionLenHelper(c, x.Mf) + case *MD: + compressionLenHelper(c, x.Md) + case *RT: + compressionLenHelper(c, x.Host) + case *RP: + compressionLenHelper(c, x.Mbox) + compressionLenHelper(c, x.Txt) + case *MINFO: + compressionLenHelper(c, x.Rmail) + compressionLenHelper(c, x.Email) + case *AFSDB: + compressionLenHelper(c, x.Hostname) + case *SRV: + compressionLenHelper(c, x.Target) + case *NAPTR: + compressionLenHelper(c, x.Replacement) + case *RRSIG: + compressionLenHelper(c, x.SignerName) + case *NSEC: + compressionLenHelper(c, x.NextDomain) + // HIP? + } +} + +// Only search on compressing these types. +func compressionLenSearchType(c map[string]int, r RR) (int, bool) { + switch x := r.(type) { + case *NS: + return compressionLenSearch(c, x.Ns) + case *MX: + return compressionLenSearch(c, x.Mx) + case *CNAME: + return compressionLenSearch(c, x.Target) + case *DNAME: + return compressionLenSearch(c, x.Target) + case *PTR: + return compressionLenSearch(c, x.Ptr) + case *SOA: + k, ok := compressionLenSearch(c, x.Ns) + k1, ok1 := compressionLenSearch(c, x.Mbox) + if !ok && !ok1 { + return 0, false + } + return k + k1, true + case *MB: + return compressionLenSearch(c, x.Mb) + case *MG: + return compressionLenSearch(c, x.Mg) + case *MR: + return compressionLenSearch(c, x.Mr) + case *MF: + return compressionLenSearch(c, x.Mf) + case *MD: + return compressionLenSearch(c, x.Md) + case *RT: + return compressionLenSearch(c, x.Host) + case *MINFO: + k, ok := compressionLenSearch(c, x.Rmail) + k1, ok1 := compressionLenSearch(c, x.Email) + if !ok && !ok1 { + return 0, false + } + return k + k1, true + case *AFSDB: + return compressionLenSearch(c, x.Hostname) + } + return 0, false +} + +// Copy returns a new RR which is a deep-copy of r. +func Copy(r RR) RR { r1 := r.copy(); return r1 } + +// Len returns the length (in octets) of the uncompressed RR in wire format. +func Len(r RR) int { return r.len() } + +// Copy returns a new *Msg which is a deep-copy of dns. +func (dns *Msg) Copy() *Msg { return dns.CopyTo(new(Msg)) } + +// CopyTo copies the contents to the provided message using a deep-copy and returns the copy. +func (dns *Msg) CopyTo(r1 *Msg) *Msg { + r1.MsgHdr = dns.MsgHdr + r1.Compress = dns.Compress + + if len(dns.Question) > 0 { + r1.Question = make([]Question, len(dns.Question)) + copy(r1.Question, dns.Question) // TODO(miek): Question is an immutable value, ok to do a shallow-copy + } + + rrArr := make([]RR, len(dns.Answer)+len(dns.Ns)+len(dns.Extra)) + var rri int + + if len(dns.Answer) > 0 { + rrbegin := rri + for i := 0; i < len(dns.Answer); i++ { + rrArr[rri] = dns.Answer[i].copy() + rri++ + } + r1.Answer = rrArr[rrbegin:rri:rri] + } + + if len(dns.Ns) > 0 { + rrbegin := rri + for i := 0; i < len(dns.Ns); i++ { + rrArr[rri] = dns.Ns[i].copy() + rri++ + } + r1.Ns = rrArr[rrbegin:rri:rri] + } + + if len(dns.Extra) > 0 { + rrbegin := rri + for i := 0; i < len(dns.Extra); i++ { + rrArr[rri] = dns.Extra[i].copy() + rri++ + } + r1.Extra = rrArr[rrbegin:rri:rri] + } + + return r1 +} + +func (q *Question) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := PackDomainName(q.Name, msg, off, compression, compress) + if err != nil { + return off, err + } + off, err = packUint16(q.Qtype, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(q.Qclass, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func unpackQuestion(msg []byte, off int) (Question, int, error) { + var ( + q Question + err error + ) + q.Name, off, err = UnpackDomainName(msg, off) + if err != nil { + return q, off, err + } + if off == len(msg) { + return q, off, nil + } + q.Qtype, off, err = unpackUint16(msg, off) + if err != nil { + return q, off, err + } + if off == len(msg) { + return q, off, nil + } + q.Qclass, off, err = unpackUint16(msg, off) + if off == len(msg) { + return q, off, nil + } + return q, off, err +} + +func (dh *Header) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := packUint16(dh.Id, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(dh.Bits, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(dh.Qdcount, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(dh.Ancount, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(dh.Nscount, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(dh.Arcount, msg, off) + return off, err +} + +func unpackMsgHdr(msg []byte, off int) (Header, int, error) { + var ( + dh Header + err error + ) + dh.Id, off, err = unpackUint16(msg, off) + if err != nil { + return dh, off, err + } + dh.Bits, off, err = unpackUint16(msg, off) + if err != nil { + return dh, off, err + } + dh.Qdcount, off, err = unpackUint16(msg, off) + if err != nil { + return dh, off, err + } + dh.Ancount, off, err = unpackUint16(msg, off) + if err != nil { + return dh, off, err + } + dh.Nscount, off, err = unpackUint16(msg, off) + if err != nil { + return dh, off, err + } + dh.Arcount, off, err = unpackUint16(msg, off) + return dh, off, err +} diff --git a/vendor/github.com/miekg/dns/msg_generate.go b/vendor/github.com/miekg/dns/msg_generate.go new file mode 100644 index 00000000..35786f22 --- /dev/null +++ b/vendor/github.com/miekg/dns/msg_generate.go @@ -0,0 +1,340 @@ +//+build ignore + +// msg_generate.go is meant to run with go generate. It will use +// go/{importer,types} to track down all the RR struct types. Then for each type +// it will generate pack/unpack methods based on the struct tags. The generated source is +// written to zmsg.go, and is meant to be checked into git. +package main + +import ( + "bytes" + "fmt" + "go/format" + "go/importer" + "go/types" + "log" + "os" + "strings" +) + +var packageHdr = ` +// *** DO NOT MODIFY *** +// AUTOGENERATED BY go generate from msg_generate.go + +package dns + +` + +// getTypeStruct will take a type and the package scope, and return the +// (innermost) struct if the type is considered a RR type (currently defined as +// those structs beginning with a RR_Header, could be redefined as implementing +// the RR interface). The bool return value indicates if embedded structs were +// resolved. +func getTypeStruct(t types.Type, scope *types.Scope) (*types.Struct, bool) { + st, ok := t.Underlying().(*types.Struct) + if !ok { + return nil, false + } + if st.Field(0).Type() == scope.Lookup("RR_Header").Type() { + return st, false + } + if st.Field(0).Anonymous() { + st, _ := getTypeStruct(st.Field(0).Type(), scope) + return st, true + } + return nil, false +} + +func main() { + // Import and type-check the package + pkg, err := importer.Default().Import("github.com/miekg/dns") + fatalIfErr(err) + scope := pkg.Scope() + + // Collect actual types (*X) + var namedTypes []string + for _, name := range scope.Names() { + o := scope.Lookup(name) + if o == nil || !o.Exported() { + continue + } + if st, _ := getTypeStruct(o.Type(), scope); st == nil { + continue + } + if name == "PrivateRR" { + continue + } + + // Check if corresponding TypeX exists + if scope.Lookup("Type"+o.Name()) == nil && o.Name() != "RFC3597" { + log.Fatalf("Constant Type%s does not exist.", o.Name()) + } + + namedTypes = append(namedTypes, o.Name()) + } + + b := &bytes.Buffer{} + b.WriteString(packageHdr) + + fmt.Fprint(b, "// pack*() functions\n\n") + for _, name := range namedTypes { + o := scope.Lookup(name) + st, _ := getTypeStruct(o.Type(), scope) + + fmt.Fprintf(b, "func (rr *%s) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) {\n", name) + fmt.Fprint(b, `off, err := rr.Hdr.pack(msg, off, compression, compress) +if err != nil { + return off, err +} +headerEnd := off +`) + for i := 1; i < st.NumFields(); i++ { + o := func(s string) { + fmt.Fprintf(b, s, st.Field(i).Name()) + fmt.Fprint(b, `if err != nil { +return off, err +} +`) + } + + if _, ok := st.Field(i).Type().(*types.Slice); ok { + switch st.Tag(i) { + case `dns:"-"`: // ignored + case `dns:"txt"`: + o("off, err = packStringTxt(rr.%s, msg, off)\n") + case `dns:"opt"`: + o("off, err = packDataOpt(rr.%s, msg, off)\n") + case `dns:"nsec"`: + o("off, err = packDataNsec(rr.%s, msg, off)\n") + case `dns:"domain-name"`: + o("off, err = packDataDomainNames(rr.%s, msg, off, compression, compress)\n") + default: + log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) + } + continue + } + + switch { + case st.Tag(i) == `dns:"-"`: // ignored + case st.Tag(i) == `dns:"cdomain-name"`: + fallthrough + case st.Tag(i) == `dns:"domain-name"`: + o("off, err = PackDomainName(rr.%s, msg, off, compression, compress)\n") + case st.Tag(i) == `dns:"a"`: + o("off, err = packDataA(rr.%s, msg, off)\n") + case st.Tag(i) == `dns:"aaaa"`: + o("off, err = packDataAAAA(rr.%s, msg, off)\n") + case st.Tag(i) == `dns:"uint48"`: + o("off, err = packUint48(rr.%s, msg, off)\n") + case st.Tag(i) == `dns:"txt"`: + o("off, err = packString(rr.%s, msg, off)\n") + + case strings.HasPrefix(st.Tag(i), `dns:"size-base32`): // size-base32 can be packed just like base32 + fallthrough + case st.Tag(i) == `dns:"base32"`: + o("off, err = packStringBase32(rr.%s, msg, off)\n") + + case strings.HasPrefix(st.Tag(i), `dns:"size-base64`): // size-base64 can be packed just like base64 + fallthrough + case st.Tag(i) == `dns:"base64"`: + o("off, err = packStringBase64(rr.%s, msg, off)\n") + + case strings.HasPrefix(st.Tag(i), `dns:"size-hex:SaltLength`): // Hack to fix empty salt length for NSEC3 + o("if rr.%s == \"-\" { /* do nothing, empty salt */ }\n") + continue + case strings.HasPrefix(st.Tag(i), `dns:"size-hex`): // size-hex can be packed just like hex + fallthrough + case st.Tag(i) == `dns:"hex"`: + o("off, err = packStringHex(rr.%s, msg, off)\n") + + case st.Tag(i) == `dns:"octet"`: + o("off, err = packStringOctet(rr.%s, msg, off)\n") + case st.Tag(i) == "": + switch st.Field(i).Type().(*types.Basic).Kind() { + case types.Uint8: + o("off, err = packUint8(rr.%s, msg, off)\n") + case types.Uint16: + o("off, err = packUint16(rr.%s, msg, off)\n") + case types.Uint32: + o("off, err = packUint32(rr.%s, msg, off)\n") + case types.Uint64: + o("off, err = packUint64(rr.%s, msg, off)\n") + case types.String: + o("off, err = packString(rr.%s, msg, off)\n") + default: + log.Fatalln(name, st.Field(i).Name()) + } + default: + log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) + } + } + // We have packed everything, only now we know the rdlength of this RR + fmt.Fprintln(b, "rr.Header().Rdlength = uint16(off-headerEnd)") + fmt.Fprintln(b, "return off, nil }\n") + } + + fmt.Fprint(b, "// unpack*() functions\n\n") + for _, name := range namedTypes { + o := scope.Lookup(name) + st, _ := getTypeStruct(o.Type(), scope) + + fmt.Fprintf(b, "func unpack%s(h RR_Header, msg []byte, off int) (RR, int, error) {\n", name) + fmt.Fprintf(b, "rr := new(%s)\n", name) + fmt.Fprint(b, "rr.Hdr = h\n") + fmt.Fprint(b, `if noRdata(h) { +return rr, off, nil + } +var err error +rdStart := off +_ = rdStart + +`) + for i := 1; i < st.NumFields(); i++ { + o := func(s string) { + fmt.Fprintf(b, s, st.Field(i).Name()) + fmt.Fprint(b, `if err != nil { +return rr, off, err +} +`) + } + + // size-* are special, because they reference a struct member we should use for the length. + if strings.HasPrefix(st.Tag(i), `dns:"size-`) { + structMember := structMember(st.Tag(i)) + structTag := structTag(st.Tag(i)) + switch structTag { + case "hex": + fmt.Fprintf(b, "rr.%s, off, err = unpackStringHex(msg, off, off + int(rr.%s))\n", st.Field(i).Name(), structMember) + case "base32": + fmt.Fprintf(b, "rr.%s, off, err = unpackStringBase32(msg, off, off + int(rr.%s))\n", st.Field(i).Name(), structMember) + case "base64": + fmt.Fprintf(b, "rr.%s, off, err = unpackStringBase64(msg, off, off + int(rr.%s))\n", st.Field(i).Name(), structMember) + default: + log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) + } + fmt.Fprint(b, `if err != nil { +return rr, off, err +} +`) + continue + } + + if _, ok := st.Field(i).Type().(*types.Slice); ok { + switch st.Tag(i) { + case `dns:"-"`: // ignored + case `dns:"txt"`: + o("rr.%s, off, err = unpackStringTxt(msg, off)\n") + case `dns:"opt"`: + o("rr.%s, off, err = unpackDataOpt(msg, off)\n") + case `dns:"nsec"`: + o("rr.%s, off, err = unpackDataNsec(msg, off)\n") + case `dns:"domain-name"`: + o("rr.%s, off, err = unpackDataDomainNames(msg, off, rdStart + int(rr.Hdr.Rdlength))\n") + default: + log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) + } + continue + } + + switch st.Tag(i) { + case `dns:"-"`: // ignored + case `dns:"cdomain-name"`: + fallthrough + case `dns:"domain-name"`: + o("rr.%s, off, err = UnpackDomainName(msg, off)\n") + case `dns:"a"`: + o("rr.%s, off, err = unpackDataA(msg, off)\n") + case `dns:"aaaa"`: + o("rr.%s, off, err = unpackDataAAAA(msg, off)\n") + case `dns:"uint48"`: + o("rr.%s, off, err = unpackUint48(msg, off)\n") + case `dns:"txt"`: + o("rr.%s, off, err = unpackString(msg, off)\n") + case `dns:"base32"`: + o("rr.%s, off, err = unpackStringBase32(msg, off, rdStart + int(rr.Hdr.Rdlength))\n") + case `dns:"base64"`: + o("rr.%s, off, err = unpackStringBase64(msg, off, rdStart + int(rr.Hdr.Rdlength))\n") + case `dns:"hex"`: + o("rr.%s, off, err = unpackStringHex(msg, off, rdStart + int(rr.Hdr.Rdlength))\n") + case `dns:"octet"`: + o("rr.%s, off, err = unpackStringOctet(msg, off)\n") + case "": + switch st.Field(i).Type().(*types.Basic).Kind() { + case types.Uint8: + o("rr.%s, off, err = unpackUint8(msg, off)\n") + case types.Uint16: + o("rr.%s, off, err = unpackUint16(msg, off)\n") + case types.Uint32: + o("rr.%s, off, err = unpackUint32(msg, off)\n") + case types.Uint64: + o("rr.%s, off, err = unpackUint64(msg, off)\n") + case types.String: + o("rr.%s, off, err = unpackString(msg, off)\n") + default: + log.Fatalln(name, st.Field(i).Name()) + } + default: + log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) + } + // If we've hit len(msg) we return without error. + if i < st.NumFields()-1 { + fmt.Fprintf(b, `if off == len(msg) { +return rr, off, nil + } +`) + } + } + fmt.Fprintf(b, "return rr, off, err }\n\n") + } + // Generate typeToUnpack map + fmt.Fprintln(b, "var typeToUnpack = map[uint16]func(RR_Header, []byte, int) (RR, int, error){") + for _, name := range namedTypes { + if name == "RFC3597" { + continue + } + fmt.Fprintf(b, "Type%s: unpack%s,\n", name, name) + } + fmt.Fprintln(b, "}\n") + + // gofmt + res, err := format.Source(b.Bytes()) + if err != nil { + b.WriteTo(os.Stderr) + log.Fatal(err) + } + + // write result + f, err := os.Create("zmsg.go") + fatalIfErr(err) + defer f.Close() + f.Write(res) +} + +// structMember will take a tag like dns:"size-base32:SaltLength" and return the last part of this string. +func structMember(s string) string { + fields := strings.Split(s, ":") + if len(fields) == 0 { + return "" + } + f := fields[len(fields)-1] + // f should have a closing " + if len(f) > 1 { + return f[:len(f)-1] + } + return f +} + +// structTag will take a tag like dns:"size-base32:SaltLength" and return base32. +func structTag(s string) string { + fields := strings.Split(s, ":") + if len(fields) < 2 { + return "" + } + return fields[1][len("\"size-"):] +} + +func fatalIfErr(err error) { + if err != nil { + log.Fatal(err) + } +} diff --git a/vendor/github.com/miekg/dns/msg_helpers.go b/vendor/github.com/miekg/dns/msg_helpers.go new file mode 100644 index 00000000..e7a9500c --- /dev/null +++ b/vendor/github.com/miekg/dns/msg_helpers.go @@ -0,0 +1,630 @@ +package dns + +import ( + "encoding/base32" + "encoding/base64" + "encoding/binary" + "encoding/hex" + "net" + "strconv" +) + +// helper functions called from the generated zmsg.go + +// These function are named after the tag to help pack/unpack, if there is no tag it is the name +// of the type they pack/unpack (string, int, etc). We prefix all with unpackData or packData, so packDataA or +// packDataDomainName. + +func unpackDataA(msg []byte, off int) (net.IP, int, error) { + if off+net.IPv4len > len(msg) { + return nil, len(msg), &Error{err: "overflow unpacking a"} + } + a := append(make(net.IP, 0, net.IPv4len), msg[off:off+net.IPv4len]...) + off += net.IPv4len + return a, off, nil +} + +func packDataA(a net.IP, msg []byte, off int) (int, error) { + // It must be a slice of 4, even if it is 16, we encode only the first 4 + if off+net.IPv4len > len(msg) { + return len(msg), &Error{err: "overflow packing a"} + } + switch len(a) { + case net.IPv4len, net.IPv6len: + copy(msg[off:], a.To4()) + off += net.IPv4len + case 0: + // Allowed, for dynamic updates. + default: + return len(msg), &Error{err: "overflow packing a"} + } + return off, nil +} + +func unpackDataAAAA(msg []byte, off int) (net.IP, int, error) { + if off+net.IPv6len > len(msg) { + return nil, len(msg), &Error{err: "overflow unpacking aaaa"} + } + aaaa := append(make(net.IP, 0, net.IPv6len), msg[off:off+net.IPv6len]...) + off += net.IPv6len + return aaaa, off, nil +} + +func packDataAAAA(aaaa net.IP, msg []byte, off int) (int, error) { + if off+net.IPv6len > len(msg) { + return len(msg), &Error{err: "overflow packing aaaa"} + } + + switch len(aaaa) { + case net.IPv6len: + copy(msg[off:], aaaa) + off += net.IPv6len + case 0: + // Allowed, dynamic updates. + default: + return len(msg), &Error{err: "overflow packing aaaa"} + } + return off, nil +} + +// unpackHeader unpacks an RR header, returning the offset to the end of the header and a +// re-sliced msg according to the expected length of the RR. +func unpackHeader(msg []byte, off int) (rr RR_Header, off1 int, truncmsg []byte, err error) { + hdr := RR_Header{} + if off == len(msg) { + return hdr, off, msg, nil + } + + hdr.Name, off, err = UnpackDomainName(msg, off) + if err != nil { + return hdr, len(msg), msg, err + } + hdr.Rrtype, off, err = unpackUint16(msg, off) + if err != nil { + return hdr, len(msg), msg, err + } + hdr.Class, off, err = unpackUint16(msg, off) + if err != nil { + return hdr, len(msg), msg, err + } + hdr.Ttl, off, err = unpackUint32(msg, off) + if err != nil { + return hdr, len(msg), msg, err + } + hdr.Rdlength, off, err = unpackUint16(msg, off) + if err != nil { + return hdr, len(msg), msg, err + } + msg, err = truncateMsgFromRdlength(msg, off, hdr.Rdlength) + return hdr, off, msg, nil +} + +// pack packs an RR header, returning the offset to the end of the header. +// See PackDomainName for documentation about the compression. +func (hdr RR_Header) pack(msg []byte, off int, compression map[string]int, compress bool) (off1 int, err error) { + if off == len(msg) { + return off, nil + } + + off, err = PackDomainName(hdr.Name, msg, off, compression, compress) + if err != nil { + return len(msg), err + } + off, err = packUint16(hdr.Rrtype, msg, off) + if err != nil { + return len(msg), err + } + off, err = packUint16(hdr.Class, msg, off) + if err != nil { + return len(msg), err + } + off, err = packUint32(hdr.Ttl, msg, off) + if err != nil { + return len(msg), err + } + off, err = packUint16(hdr.Rdlength, msg, off) + if err != nil { + return len(msg), err + } + return off, nil +} + +// helper helper functions. + +// truncateMsgFromRdLength truncates msg to match the expected length of the RR. +// Returns an error if msg is smaller than the expected size. +func truncateMsgFromRdlength(msg []byte, off int, rdlength uint16) (truncmsg []byte, err error) { + lenrd := off + int(rdlength) + if lenrd > len(msg) { + return msg, &Error{err: "overflowing header size"} + } + return msg[:lenrd], nil +} + +func fromBase32(s []byte) (buf []byte, err error) { + buflen := base32.HexEncoding.DecodedLen(len(s)) + buf = make([]byte, buflen) + n, err := base32.HexEncoding.Decode(buf, s) + buf = buf[:n] + return +} + +func toBase32(b []byte) string { return base32.HexEncoding.EncodeToString(b) } + +func fromBase64(s []byte) (buf []byte, err error) { + buflen := base64.StdEncoding.DecodedLen(len(s)) + buf = make([]byte, buflen) + n, err := base64.StdEncoding.Decode(buf, s) + buf = buf[:n] + return +} + +func toBase64(b []byte) string { return base64.StdEncoding.EncodeToString(b) } + +// dynamicUpdate returns true if the Rdlength is zero. +func noRdata(h RR_Header) bool { return h.Rdlength == 0 } + +func unpackUint8(msg []byte, off int) (i uint8, off1 int, err error) { + if off+1 > len(msg) { + return 0, len(msg), &Error{err: "overflow unpacking uint8"} + } + return uint8(msg[off]), off + 1, nil +} + +func packUint8(i uint8, msg []byte, off int) (off1 int, err error) { + if off+1 > len(msg) { + return len(msg), &Error{err: "overflow packing uint8"} + } + msg[off] = byte(i) + return off + 1, nil +} + +func unpackUint16(msg []byte, off int) (i uint16, off1 int, err error) { + if off+2 > len(msg) { + return 0, len(msg), &Error{err: "overflow unpacking uint16"} + } + return binary.BigEndian.Uint16(msg[off:]), off + 2, nil +} + +func packUint16(i uint16, msg []byte, off int) (off1 int, err error) { + if off+2 > len(msg) { + return len(msg), &Error{err: "overflow packing uint16"} + } + binary.BigEndian.PutUint16(msg[off:], i) + return off + 2, nil +} + +func unpackUint32(msg []byte, off int) (i uint32, off1 int, err error) { + if off+4 > len(msg) { + return 0, len(msg), &Error{err: "overflow unpacking uint32"} + } + return binary.BigEndian.Uint32(msg[off:]), off + 4, nil +} + +func packUint32(i uint32, msg []byte, off int) (off1 int, err error) { + if off+4 > len(msg) { + return len(msg), &Error{err: "overflow packing uint32"} + } + binary.BigEndian.PutUint32(msg[off:], i) + return off + 4, nil +} + +func unpackUint48(msg []byte, off int) (i uint64, off1 int, err error) { + if off+6 > len(msg) { + return 0, len(msg), &Error{err: "overflow unpacking uint64 as uint48"} + } + // Used in TSIG where the last 48 bits are occupied, so for now, assume a uint48 (6 bytes) + i = (uint64(uint64(msg[off])<<40 | uint64(msg[off+1])<<32 | uint64(msg[off+2])<<24 | uint64(msg[off+3])<<16 | + uint64(msg[off+4])<<8 | uint64(msg[off+5]))) + off += 6 + return i, off, nil +} + +func packUint48(i uint64, msg []byte, off int) (off1 int, err error) { + if off+6 > len(msg) { + return len(msg), &Error{err: "overflow packing uint64 as uint48"} + } + msg[off] = byte(i >> 40) + msg[off+1] = byte(i >> 32) + msg[off+2] = byte(i >> 24) + msg[off+3] = byte(i >> 16) + msg[off+4] = byte(i >> 8) + msg[off+5] = byte(i) + off += 6 + return off, nil +} + +func unpackUint64(msg []byte, off int) (i uint64, off1 int, err error) { + if off+8 > len(msg) { + return 0, len(msg), &Error{err: "overflow unpacking uint64"} + } + return binary.BigEndian.Uint64(msg[off:]), off + 8, nil +} + +func packUint64(i uint64, msg []byte, off int) (off1 int, err error) { + if off+8 > len(msg) { + return len(msg), &Error{err: "overflow packing uint64"} + } + binary.BigEndian.PutUint64(msg[off:], i) + off += 8 + return off, nil +} + +func unpackString(msg []byte, off int) (string, int, error) { + if off+1 > len(msg) { + return "", off, &Error{err: "overflow unpacking txt"} + } + l := int(msg[off]) + if off+l+1 > len(msg) { + return "", off, &Error{err: "overflow unpacking txt"} + } + s := make([]byte, 0, l) + for _, b := range msg[off+1 : off+1+l] { + switch b { + case '"', '\\': + s = append(s, '\\', b) + case '\t', '\r', '\n': + s = append(s, b) + default: + if b < 32 || b > 127 { // unprintable + var buf [3]byte + bufs := strconv.AppendInt(buf[:0], int64(b), 10) + s = append(s, '\\') + for i := 0; i < 3-len(bufs); i++ { + s = append(s, '0') + } + for _, r := range bufs { + s = append(s, r) + } + } else { + s = append(s, b) + } + } + } + off += 1 + l + return string(s), off, nil +} + +func packString(s string, msg []byte, off int) (int, error) { + txtTmp := make([]byte, 256*4+1) + off, err := packTxtString(s, msg, off, txtTmp) + if err != nil { + return len(msg), err + } + return off, nil +} + +func unpackStringBase32(msg []byte, off, end int) (string, int, error) { + if end > len(msg) { + return "", len(msg), &Error{err: "overflow unpacking base32"} + } + s := toBase32(msg[off:end]) + return s, end, nil +} + +func packStringBase32(s string, msg []byte, off int) (int, error) { + b32, err := fromBase32([]byte(s)) + if err != nil { + return len(msg), err + } + if off+len(b32) > len(msg) { + return len(msg), &Error{err: "overflow packing base32"} + } + copy(msg[off:off+len(b32)], b32) + off += len(b32) + return off, nil +} + +func unpackStringBase64(msg []byte, off, end int) (string, int, error) { + // Rest of the RR is base64 encoded value, so we don't need an explicit length + // to be set. Thus far all RR's that have base64 encoded fields have those as their + // last one. What we do need is the end of the RR! + if end > len(msg) { + return "", len(msg), &Error{err: "overflow unpacking base64"} + } + s := toBase64(msg[off:end]) + return s, end, nil +} + +func packStringBase64(s string, msg []byte, off int) (int, error) { + b64, err := fromBase64([]byte(s)) + if err != nil { + return len(msg), err + } + if off+len(b64) > len(msg) { + return len(msg), &Error{err: "overflow packing base64"} + } + copy(msg[off:off+len(b64)], b64) + off += len(b64) + return off, nil +} + +func unpackStringHex(msg []byte, off, end int) (string, int, error) { + // Rest of the RR is hex encoded value, so we don't need an explicit length + // to be set. NSEC and TSIG have hex fields with a length field. + // What we do need is the end of the RR! + if end > len(msg) { + return "", len(msg), &Error{err: "overflow unpacking hex"} + } + + s := hex.EncodeToString(msg[off:end]) + return s, end, nil +} + +func packStringHex(s string, msg []byte, off int) (int, error) { + h, err := hex.DecodeString(s) + if err != nil { + return len(msg), err + } + if off+(len(h)) > len(msg) { + return len(msg), &Error{err: "overflow packing hex"} + } + copy(msg[off:off+len(h)], h) + off += len(h) + return off, nil +} + +func unpackStringTxt(msg []byte, off int) ([]string, int, error) { + txt, off, err := unpackTxt(msg, off) + if err != nil { + return nil, len(msg), err + } + return txt, off, nil +} + +func packStringTxt(s []string, msg []byte, off int) (int, error) { + txtTmp := make([]byte, 256*4+1) // If the whole string consists out of \DDD we need this many. + off, err := packTxt(s, msg, off, txtTmp) + if err != nil { + return len(msg), err + } + return off, nil +} + +func unpackDataOpt(msg []byte, off int) ([]EDNS0, int, error) { + var edns []EDNS0 +Option: + code := uint16(0) + if off+4 > len(msg) { + return nil, len(msg), &Error{err: "overflow unpacking opt"} + } + code = binary.BigEndian.Uint16(msg[off:]) + off += 2 + optlen := binary.BigEndian.Uint16(msg[off:]) + off += 2 + if off+int(optlen) > len(msg) { + return nil, len(msg), &Error{err: "overflow unpacking opt"} + } + switch code { + case EDNS0NSID: + e := new(EDNS0_NSID) + if err := e.unpack(msg[off : off+int(optlen)]); err != nil { + return nil, len(msg), err + } + edns = append(edns, e) + off += int(optlen) + case EDNS0SUBNET, EDNS0SUBNETDRAFT: + e := new(EDNS0_SUBNET) + if err := e.unpack(msg[off : off+int(optlen)]); err != nil { + return nil, len(msg), err + } + edns = append(edns, e) + off += int(optlen) + if code == EDNS0SUBNETDRAFT { + e.DraftOption = true + } + case EDNS0COOKIE: + e := new(EDNS0_COOKIE) + if err := e.unpack(msg[off : off+int(optlen)]); err != nil { + return nil, len(msg), err + } + edns = append(edns, e) + off += int(optlen) + case EDNS0UL: + e := new(EDNS0_UL) + if err := e.unpack(msg[off : off+int(optlen)]); err != nil { + return nil, len(msg), err + } + edns = append(edns, e) + off += int(optlen) + case EDNS0LLQ: + e := new(EDNS0_LLQ) + if err := e.unpack(msg[off : off+int(optlen)]); err != nil { + return nil, len(msg), err + } + edns = append(edns, e) + off += int(optlen) + case EDNS0DAU: + e := new(EDNS0_DAU) + if err := e.unpack(msg[off : off+int(optlen)]); err != nil { + return nil, len(msg), err + } + edns = append(edns, e) + off += int(optlen) + case EDNS0DHU: + e := new(EDNS0_DHU) + if err := e.unpack(msg[off : off+int(optlen)]); err != nil { + return nil, len(msg), err + } + edns = append(edns, e) + off += int(optlen) + case EDNS0N3U: + e := new(EDNS0_N3U) + if err := e.unpack(msg[off : off+int(optlen)]); err != nil { + return nil, len(msg), err + } + edns = append(edns, e) + off += int(optlen) + default: + e := new(EDNS0_LOCAL) + e.Code = code + if err := e.unpack(msg[off : off+int(optlen)]); err != nil { + return nil, len(msg), err + } + edns = append(edns, e) + off += int(optlen) + } + + if off < len(msg) { + goto Option + } + + return edns, off, nil +} + +func packDataOpt(options []EDNS0, msg []byte, off int) (int, error) { + for _, el := range options { + b, err := el.pack() + if err != nil || off+3 > len(msg) { + return len(msg), &Error{err: "overflow packing opt"} + } + binary.BigEndian.PutUint16(msg[off:], el.Option()) // Option code + binary.BigEndian.PutUint16(msg[off+2:], uint16(len(b))) // Length + off += 4 + if off+len(b) > len(msg) { + copy(msg[off:], b) + off = len(msg) + continue + } + // Actual data + copy(msg[off:off+len(b)], b) + off += len(b) + } + return off, nil +} + +func unpackStringOctet(msg []byte, off int) (string, int, error) { + s := string(msg[off:]) + return s, len(msg), nil +} + +func packStringOctet(s string, msg []byte, off int) (int, error) { + txtTmp := make([]byte, 256*4+1) + off, err := packOctetString(s, msg, off, txtTmp) + if err != nil { + return len(msg), err + } + return off, nil +} + +func unpackDataNsec(msg []byte, off int) ([]uint16, int, error) { + var nsec []uint16 + length, window, lastwindow := 0, 0, -1 + for off < len(msg) { + if off+2 > len(msg) { + return nsec, len(msg), &Error{err: "overflow unpacking nsecx"} + } + window = int(msg[off]) + length = int(msg[off+1]) + off += 2 + if window <= lastwindow { + // RFC 4034: Blocks are present in the NSEC RR RDATA in + // increasing numerical order. + return nsec, len(msg), &Error{err: "out of order NSEC block"} + } + if length == 0 { + // RFC 4034: Blocks with no types present MUST NOT be included. + return nsec, len(msg), &Error{err: "empty NSEC block"} + } + if length > 32 { + return nsec, len(msg), &Error{err: "NSEC block too long"} + } + if off+length > len(msg) { + return nsec, len(msg), &Error{err: "overflowing NSEC block"} + } + + // Walk the bytes in the window and extract the type bits + for j := 0; j < length; j++ { + b := msg[off+j] + // Check the bits one by one, and set the type + if b&0x80 == 0x80 { + nsec = append(nsec, uint16(window*256+j*8+0)) + } + if b&0x40 == 0x40 { + nsec = append(nsec, uint16(window*256+j*8+1)) + } + if b&0x20 == 0x20 { + nsec = append(nsec, uint16(window*256+j*8+2)) + } + if b&0x10 == 0x10 { + nsec = append(nsec, uint16(window*256+j*8+3)) + } + if b&0x8 == 0x8 { + nsec = append(nsec, uint16(window*256+j*8+4)) + } + if b&0x4 == 0x4 { + nsec = append(nsec, uint16(window*256+j*8+5)) + } + if b&0x2 == 0x2 { + nsec = append(nsec, uint16(window*256+j*8+6)) + } + if b&0x1 == 0x1 { + nsec = append(nsec, uint16(window*256+j*8+7)) + } + } + off += length + lastwindow = window + } + return nsec, off, nil +} + +func packDataNsec(bitmap []uint16, msg []byte, off int) (int, error) { + if len(bitmap) == 0 { + return off, nil + } + var lastwindow, lastlength uint16 + for j := 0; j < len(bitmap); j++ { + t := bitmap[j] + window := t / 256 + length := (t-window*256)/8 + 1 + if window > lastwindow && lastlength != 0 { // New window, jump to the new offset + off += int(lastlength) + 2 + lastlength = 0 + } + if window < lastwindow || length < lastlength { + return len(msg), &Error{err: "nsec bits out of order"} + } + if off+2+int(length) > len(msg) { + return len(msg), &Error{err: "overflow packing nsec"} + } + // Setting the window # + msg[off] = byte(window) + // Setting the octets length + msg[off+1] = byte(length) + // Setting the bit value for the type in the right octet + msg[off+1+int(length)] |= byte(1 << (7 - (t % 8))) + lastwindow, lastlength = window, length + } + off += int(lastlength) + 2 + return off, nil +} + +func unpackDataDomainNames(msg []byte, off, end int) ([]string, int, error) { + var ( + servers []string + s string + err error + ) + if end > len(msg) { + return nil, len(msg), &Error{err: "overflow unpacking domain names"} + } + for off < end { + s, off, err = UnpackDomainName(msg, off) + if err != nil { + return servers, len(msg), err + } + servers = append(servers, s) + } + return servers, off, nil +} + +func packDataDomainNames(names []string, msg []byte, off int, compression map[string]int, compress bool) (int, error) { + var err error + for j := 0; j < len(names); j++ { + off, err = PackDomainName(names[j], msg, off, compression, false && compress) + if err != nil { + return len(msg), err + } + } + return off, nil +} diff --git a/vendor/github.com/miekg/dns/nsecx.go b/vendor/github.com/miekg/dns/nsecx.go new file mode 100644 index 00000000..6f10f3e6 --- /dev/null +++ b/vendor/github.com/miekg/dns/nsecx.go @@ -0,0 +1,119 @@ +package dns + +import ( + "crypto/sha1" + "hash" + "io" + "strings" +) + +type saltWireFmt struct { + Salt string `dns:"size-hex"` +} + +// HashName hashes a string (label) according to RFC 5155. It returns the hashed string in uppercase. +func HashName(label string, ha uint8, iter uint16, salt string) string { + saltwire := new(saltWireFmt) + saltwire.Salt = salt + wire := make([]byte, DefaultMsgSize) + n, err := packSaltWire(saltwire, wire) + if err != nil { + return "" + } + wire = wire[:n] + name := make([]byte, 255) + off, err := PackDomainName(strings.ToLower(label), name, 0, nil, false) + if err != nil { + return "" + } + name = name[:off] + var s hash.Hash + switch ha { + case SHA1: + s = sha1.New() + default: + return "" + } + + // k = 0 + name = append(name, wire...) + io.WriteString(s, string(name)) + nsec3 := s.Sum(nil) + // k > 0 + for k := uint16(0); k < iter; k++ { + s.Reset() + nsec3 = append(nsec3, wire...) + io.WriteString(s, string(nsec3)) + nsec3 = s.Sum(nil) + } + return toBase32(nsec3) +} + +// Denialer is an interface that should be implemented by types that are used to denial +// answers in DNSSEC. +type Denialer interface { + // Cover will check if the (unhashed) name is being covered by this NSEC or NSEC3. + Cover(name string) bool + // Match will check if the ownername matches the (unhashed) name for this NSEC3 or NSEC3. + Match(name string) bool +} + +// Cover implements the Denialer interface. +func (rr *NSEC) Cover(name string) bool { + return true +} + +// Match implements the Denialer interface. +func (rr *NSEC) Match(name string) bool { + return true +} + +// Cover implements the Denialer interface. +func (rr *NSEC3) Cover(name string) bool { + // FIXME(miek): check if the zones match + // FIXME(miek): check if we're not dealing with parent nsec3 + hname := HashName(name, rr.Hash, rr.Iterations, rr.Salt) + labels := Split(rr.Hdr.Name) + if len(labels) < 2 { + return false + } + hash := strings.ToUpper(rr.Hdr.Name[labels[0] : labels[1]-1]) // -1 to remove the dot + if hash == rr.NextDomain { + return false // empty interval + } + if hash > rr.NextDomain { // last name, points to apex + // hname > hash + // hname > rr.NextDomain + // TODO(miek) + } + if hname <= hash { + return false + } + if hname >= rr.NextDomain { + return false + } + return true +} + +// Match implements the Denialer interface. +func (rr *NSEC3) Match(name string) bool { + // FIXME(miek): Check if we are in the same zone + hname := HashName(name, rr.Hash, rr.Iterations, rr.Salt) + labels := Split(rr.Hdr.Name) + if len(labels) < 2 { + return false + } + hash := strings.ToUpper(rr.Hdr.Name[labels[0] : labels[1]-1]) // -1 to remove the . + if hash == hname { + return true + } + return false +} + +func packSaltWire(sw *saltWireFmt, msg []byte) (int, error) { + off, err := packStringHex(sw.Salt, msg, 0) + if err != nil { + return off, err + } + return off, nil +} diff --git a/vendor/github.com/miekg/dns/privaterr.go b/vendor/github.com/miekg/dns/privaterr.go new file mode 100644 index 00000000..6b08e6e9 --- /dev/null +++ b/vendor/github.com/miekg/dns/privaterr.go @@ -0,0 +1,149 @@ +package dns + +import ( + "fmt" + "strings" +) + +// PrivateRdata is an interface used for implementing "Private Use" RR types, see +// RFC 6895. This allows one to experiment with new RR types, without requesting an +// official type code. Also see dns.PrivateHandle and dns.PrivateHandleRemove. +type PrivateRdata interface { + // String returns the text presentaton of the Rdata of the Private RR. + String() string + // Parse parses the Rdata of the private RR. + Parse([]string) error + // Pack is used when packing a private RR into a buffer. + Pack([]byte) (int, error) + // Unpack is used when unpacking a private RR from a buffer. + // TODO(miek): diff. signature than Pack, see edns0.go for instance. + Unpack([]byte) (int, error) + // Copy copies the Rdata. + Copy(PrivateRdata) error + // Len returns the length in octets of the Rdata. + Len() int +} + +// PrivateRR represents an RR that uses a PrivateRdata user-defined type. +// It mocks normal RRs and implements dns.RR interface. +type PrivateRR struct { + Hdr RR_Header + Data PrivateRdata +} + +func mkPrivateRR(rrtype uint16) *PrivateRR { + // Panics if RR is not an instance of PrivateRR. + rrfunc, ok := TypeToRR[rrtype] + if !ok { + panic(fmt.Sprintf("dns: invalid operation with Private RR type %d", rrtype)) + } + + anyrr := rrfunc() + switch rr := anyrr.(type) { + case *PrivateRR: + return rr + } + panic(fmt.Sprintf("dns: RR is not a PrivateRR, TypeToRR[%d] generator returned %T", rrtype, anyrr)) +} + +// Header return the RR header of r. +func (r *PrivateRR) Header() *RR_Header { return &r.Hdr } + +func (r *PrivateRR) String() string { return r.Hdr.String() + r.Data.String() } + +// Private len and copy parts to satisfy RR interface. +func (r *PrivateRR) len() int { return r.Hdr.len() + r.Data.Len() } +func (r *PrivateRR) copy() RR { + // make new RR like this: + rr := mkPrivateRR(r.Hdr.Rrtype) + newh := r.Hdr.copyHeader() + rr.Hdr = *newh + + err := r.Data.Copy(rr.Data) + if err != nil { + panic("dns: got value that could not be used to copy Private rdata") + } + return rr +} +func (r *PrivateRR) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := r.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + n, err := r.Data.Pack(msg[off:]) + if err != nil { + return len(msg), err + } + off += n + r.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +// PrivateHandle registers a private resource record type. It requires +// string and numeric representation of private RR type and generator function as argument. +func PrivateHandle(rtypestr string, rtype uint16, generator func() PrivateRdata) { + rtypestr = strings.ToUpper(rtypestr) + + TypeToRR[rtype] = func() RR { return &PrivateRR{RR_Header{}, generator()} } + TypeToString[rtype] = rtypestr + StringToType[rtypestr] = rtype + + typeToUnpack[rtype] = func(h RR_Header, msg []byte, off int) (RR, int, error) { + if noRdata(h) { + return &h, off, nil + } + var err error + + rr := mkPrivateRR(h.Rrtype) + rr.Hdr = h + + off1, err := rr.Data.Unpack(msg[off:]) + off += off1 + if err != nil { + return rr, off, err + } + return rr, off, err + } + + setPrivateRR := func(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := mkPrivateRR(h.Rrtype) + rr.Hdr = h + + var l lex + text := make([]string, 0, 2) // could be 0..N elements, median is probably 1 + Fetch: + for { + // TODO(miek): we could also be returning _QUOTE, this might or might not + // be an issue (basically parsing TXT becomes hard) + switch l = <-c; l.value { + case zNewline, zEOF: + break Fetch + case zString: + text = append(text, l.token) + } + } + + err := rr.Data.Parse(text) + if err != nil { + return nil, &ParseError{f, err.Error(), l}, "" + } + + return rr, nil, "" + } + + typeToparserFunc[rtype] = parserFunc{setPrivateRR, true} +} + +// PrivateHandleRemove removes defenitions required to support private RR type. +func PrivateHandleRemove(rtype uint16) { + rtypestr, ok := TypeToString[rtype] + if ok { + delete(TypeToRR, rtype) + delete(TypeToString, rtype) + delete(typeToparserFunc, rtype) + delete(StringToType, rtypestr) + delete(typeToUnpack, rtype) + } + return +} diff --git a/vendor/github.com/miekg/dns/rawmsg.go b/vendor/github.com/miekg/dns/rawmsg.go new file mode 100644 index 00000000..6e21fba7 --- /dev/null +++ b/vendor/github.com/miekg/dns/rawmsg.go @@ -0,0 +1,49 @@ +package dns + +import "encoding/binary" + +// rawSetRdlength sets the rdlength in the header of +// the RR. The offset 'off' must be positioned at the +// start of the header of the RR, 'end' must be the +// end of the RR. +func rawSetRdlength(msg []byte, off, end int) bool { + l := len(msg) +Loop: + for { + if off+1 > l { + return false + } + c := int(msg[off]) + off++ + switch c & 0xC0 { + case 0x00: + if c == 0x00 { + // End of the domainname + break Loop + } + if off+c > l { + return false + } + off += c + + case 0xC0: + // pointer, next byte included, ends domainname + off++ + break Loop + } + } + // The domainname has been seen, we at the start of the fixed part in the header. + // Type is 2 bytes, class is 2 bytes, ttl 4 and then 2 bytes for the length. + off += 2 + 2 + 4 + if off+2 > l { + return false + } + //off+1 is the end of the header, 'end' is the end of the rr + //so 'end' - 'off+2' is the length of the rdata + rdatalen := end - (off + 2) + if rdatalen > 0xFFFF { + return false + } + binary.BigEndian.PutUint16(msg[off:], uint16(rdatalen)) + return true +} diff --git a/vendor/github.com/miekg/dns/reverse.go b/vendor/github.com/miekg/dns/reverse.go new file mode 100644 index 00000000..099dac94 --- /dev/null +++ b/vendor/github.com/miekg/dns/reverse.go @@ -0,0 +1,38 @@ +package dns + +// StringToType is the reverse of TypeToString, needed for string parsing. +var StringToType = reverseInt16(TypeToString) + +// StringToClass is the reverse of ClassToString, needed for string parsing. +var StringToClass = reverseInt16(ClassToString) + +// Map of opcodes strings. +var StringToOpcode = reverseInt(OpcodeToString) + +// Map of rcodes strings. +var StringToRcode = reverseInt(RcodeToString) + +// Reverse a map +func reverseInt8(m map[uint8]string) map[string]uint8 { + n := make(map[string]uint8, len(m)) + for u, s := range m { + n[s] = u + } + return n +} + +func reverseInt16(m map[uint16]string) map[string]uint16 { + n := make(map[string]uint16, len(m)) + for u, s := range m { + n[s] = u + } + return n +} + +func reverseInt(m map[int]string) map[string]int { + n := make(map[string]int, len(m)) + for u, s := range m { + n[s] = u + } + return n +} diff --git a/vendor/github.com/miekg/dns/sanitize.go b/vendor/github.com/miekg/dns/sanitize.go new file mode 100644 index 00000000..b489f3f0 --- /dev/null +++ b/vendor/github.com/miekg/dns/sanitize.go @@ -0,0 +1,84 @@ +package dns + +// Dedup removes identical RRs from rrs. It preserves the original ordering. +// The lowest TTL of any duplicates is used in the remaining one. Dedup modifies +// rrs. +// m is used to store the RRs temporay. If it is nil a new map will be allocated. +func Dedup(rrs []RR, m map[string]RR) []RR { + if m == nil { + m = make(map[string]RR) + } + // Save the keys, so we don't have to call normalizedString twice. + keys := make([]*string, 0, len(rrs)) + + for _, r := range rrs { + key := normalizedString(r) + keys = append(keys, &key) + if _, ok := m[key]; ok { + // Shortest TTL wins. + if m[key].Header().Ttl > r.Header().Ttl { + m[key].Header().Ttl = r.Header().Ttl + } + continue + } + + m[key] = r + } + // If the length of the result map equals the amount of RRs we got, + // it means they were all different. We can then just return the original rrset. + if len(m) == len(rrs) { + return rrs + } + + j := 0 + for i, r := range rrs { + // If keys[i] lives in the map, we should copy and remove it. + if _, ok := m[*keys[i]]; ok { + delete(m, *keys[i]) + rrs[j] = r + j++ + } + + if len(m) == 0 { + break + } + } + + return rrs[:j] +} + +// normalizedString returns a normalized string from r. The TTL +// is removed and the domain name is lowercased. We go from this: +// DomainNameTTLCLASSTYPERDATA to: +// lowercasenameCLASSTYPE... +func normalizedString(r RR) string { + // A string Go DNS makes has: domainnameTTL... + b := []byte(r.String()) + + // find the first non-escaped tab, then another, so we capture where the TTL lives. + esc := false + ttlStart, ttlEnd := 0, 0 + for i := 0; i < len(b) && ttlEnd == 0; i++ { + switch { + case b[i] == '\\': + esc = !esc + case b[i] == '\t' && !esc: + if ttlStart == 0 { + ttlStart = i + continue + } + if ttlEnd == 0 { + ttlEnd = i + } + case b[i] >= 'A' && b[i] <= 'Z' && !esc: + b[i] += 32 + default: + esc = false + } + } + + // remove TTL. + copy(b[ttlStart:], b[ttlEnd:]) + cut := ttlEnd - ttlStart + return string(b[:len(b)-cut]) +} diff --git a/vendor/github.com/miekg/dns/scan.go b/vendor/github.com/miekg/dns/scan.go new file mode 100644 index 00000000..d34597ba --- /dev/null +++ b/vendor/github.com/miekg/dns/scan.go @@ -0,0 +1,981 @@ +package dns + +import ( + "io" + "log" + "os" + "strconv" + "strings" +) + +type debugging bool + +const debug debugging = false + +func (d debugging) Printf(format string, args ...interface{}) { + if d { + log.Printf(format, args...) + } +} + +const maxTok = 2048 // Largest token we can return. +const maxUint16 = 1<<16 - 1 + +// Tokinize a RFC 1035 zone file. The tokenizer will normalize it: +// * Add ownernames if they are left blank; +// * Suppress sequences of spaces; +// * Make each RR fit on one line (_NEWLINE is send as last) +// * Handle comments: ; +// * Handle braces - anywhere. +const ( + // Zonefile + zEOF = iota + zString + zBlank + zQuote + zNewline + zRrtpe + zOwner + zClass + zDirOrigin // $ORIGIN + zDirTtl // $TTL + zDirInclude // $INCLUDE + zDirGenerate // $GENERATE + + // Privatekey file + zValue + zKey + + zExpectOwnerDir // Ownername + zExpectOwnerBl // Whitespace after the ownername + zExpectAny // Expect rrtype, ttl or class + zExpectAnyNoClass // Expect rrtype or ttl + zExpectAnyNoClassBl // The whitespace after _EXPECT_ANY_NOCLASS + zExpectAnyNoTtl // Expect rrtype or class + zExpectAnyNoTtlBl // Whitespace after _EXPECT_ANY_NOTTL + zExpectRrtype // Expect rrtype + zExpectRrtypeBl // Whitespace BEFORE rrtype + zExpectRdata // The first element of the rdata + zExpectDirTtlBl // Space after directive $TTL + zExpectDirTtl // Directive $TTL + zExpectDirOriginBl // Space after directive $ORIGIN + zExpectDirOrigin // Directive $ORIGIN + zExpectDirIncludeBl // Space after directive $INCLUDE + zExpectDirInclude // Directive $INCLUDE + zExpectDirGenerate // Directive $GENERATE + zExpectDirGenerateBl // Space after directive $GENERATE +) + +// ParseError is a parsing error. It contains the parse error and the location in the io.Reader +// where the error occurred. +type ParseError struct { + file string + err string + lex lex +} + +func (e *ParseError) Error() (s string) { + if e.file != "" { + s = e.file + ": " + } + s += "dns: " + e.err + ": " + strconv.QuoteToASCII(e.lex.token) + " at line: " + + strconv.Itoa(e.lex.line) + ":" + strconv.Itoa(e.lex.column) + return +} + +type lex struct { + token string // text of the token + tokenUpper string // uppercase text of the token + length int // length of the token + err bool // when true, token text has lexer error + value uint8 // value: zString, _BLANK, etc. + line int // line in the file + column int // column in the file + torc uint16 // type or class as parsed in the lexer, we only need to look this up in the grammar + comment string // any comment text seen +} + +// Token holds the token that are returned when a zone file is parsed. +type Token struct { + // The scanned resource record when error is not nil. + RR + // When an error occurred, this has the error specifics. + Error *ParseError + // A potential comment positioned after the RR and on the same line. + Comment string +} + +// NewRR reads the RR contained in the string s. Only the first RR is +// returned. If s contains no RR, return nil with no error. The class +// defaults to IN and TTL defaults to 3600. The full zone file syntax +// like $TTL, $ORIGIN, etc. is supported. All fields of the returned +// RR are set, except RR.Header().Rdlength which is set to 0. +func NewRR(s string) (RR, error) { + if len(s) > 0 && s[len(s)-1] != '\n' { // We need a closing newline + return ReadRR(strings.NewReader(s+"\n"), "") + } + return ReadRR(strings.NewReader(s), "") +} + +// ReadRR reads the RR contained in q. +// See NewRR for more documentation. +func ReadRR(q io.Reader, filename string) (RR, error) { + r := <-parseZoneHelper(q, ".", filename, 1) + if r == nil { + return nil, nil + } + + if r.Error != nil { + return nil, r.Error + } + return r.RR, nil +} + +// ParseZone reads a RFC 1035 style zonefile from r. It returns *Tokens on the +// returned channel, which consist out the parsed RR, a potential comment or an error. +// If there is an error the RR is nil. The string file is only used +// in error reporting. The string origin is used as the initial origin, as +// if the file would start with: $ORIGIN origin . +// The directives $INCLUDE, $ORIGIN, $TTL and $GENERATE are supported. +// The channel t is closed by ParseZone when the end of r is reached. +// +// Basic usage pattern when reading from a string (z) containing the +// zone data: +// +// for x := range dns.ParseZone(strings.NewReader(z), "", "") { +// if x.Error != nil { +// // log.Println(x.Error) +// } else { +// // Do something with x.RR +// } +// } +// +// Comments specified after an RR (and on the same line!) are returned too: +// +// foo. IN A 10.0.0.1 ; this is a comment +// +// The text "; this is comment" is returned in Token.Comment. Comments inside the +// RR are discarded. Comments on a line by themselves are discarded too. +func ParseZone(r io.Reader, origin, file string) chan *Token { + return parseZoneHelper(r, origin, file, 10000) +} + +func parseZoneHelper(r io.Reader, origin, file string, chansize int) chan *Token { + t := make(chan *Token, chansize) + go parseZone(r, origin, file, t, 0) + return t +} + +func parseZone(r io.Reader, origin, f string, t chan *Token, include int) { + defer func() { + if include == 0 { + close(t) + } + }() + s := scanInit(r) + c := make(chan lex) + // Start the lexer + go zlexer(s, c) + // 6 possible beginnings of a line, _ is a space + // 0. zRRTYPE -> all omitted until the rrtype + // 1. zOwner _ zRrtype -> class/ttl omitted + // 2. zOwner _ zString _ zRrtype -> class omitted + // 3. zOwner _ zString _ zClass _ zRrtype -> ttl/class + // 4. zOwner _ zClass _ zRrtype -> ttl omitted + // 5. zOwner _ zClass _ zString _ zRrtype -> class/ttl (reversed) + // After detecting these, we know the zRrtype so we can jump to functions + // handling the rdata for each of these types. + + if origin == "" { + origin = "." + } + origin = Fqdn(origin) + if _, ok := IsDomainName(origin); !ok { + t <- &Token{Error: &ParseError{f, "bad initial origin name", lex{}}} + return + } + + st := zExpectOwnerDir // initial state + var h RR_Header + var defttl uint32 = defaultTtl + var prevName string + for l := range c { + // Lexer spotted an error already + if l.err == true { + t <- &Token{Error: &ParseError{f, l.token, l}} + return + + } + switch st { + case zExpectOwnerDir: + // We can also expect a directive, like $TTL or $ORIGIN + h.Ttl = defttl + h.Class = ClassINET + switch l.value { + case zNewline: + st = zExpectOwnerDir + case zOwner: + h.Name = l.token + if l.token[0] == '@' { + h.Name = origin + prevName = h.Name + st = zExpectOwnerBl + break + } + if h.Name[l.length-1] != '.' { + h.Name = appendOrigin(h.Name, origin) + } + _, ok := IsDomainName(l.token) + if !ok { + t <- &Token{Error: &ParseError{f, "bad owner name", l}} + return + } + prevName = h.Name + st = zExpectOwnerBl + case zDirTtl: + st = zExpectDirTtlBl + case zDirOrigin: + st = zExpectDirOriginBl + case zDirInclude: + st = zExpectDirIncludeBl + case zDirGenerate: + st = zExpectDirGenerateBl + case zRrtpe: + h.Name = prevName + h.Rrtype = l.torc + st = zExpectRdata + case zClass: + h.Name = prevName + h.Class = l.torc + st = zExpectAnyNoClassBl + case zBlank: + // Discard, can happen when there is nothing on the + // line except the RR type + case zString: + ttl, ok := stringToTtl(l.token) + if !ok { + t <- &Token{Error: &ParseError{f, "not a TTL", l}} + return + } + h.Ttl = ttl + // Don't about the defttl, we should take the $TTL value + // defttl = ttl + st = zExpectAnyNoTtlBl + + default: + t <- &Token{Error: &ParseError{f, "syntax error at beginning", l}} + return + } + case zExpectDirIncludeBl: + if l.value != zBlank { + t <- &Token{Error: &ParseError{f, "no blank after $INCLUDE-directive", l}} + return + } + st = zExpectDirInclude + case zExpectDirInclude: + if l.value != zString { + t <- &Token{Error: &ParseError{f, "expecting $INCLUDE value, not this...", l}} + return + } + neworigin := origin // There may be optionally a new origin set after the filename, if not use current one + l := <-c + switch l.value { + case zBlank: + l := <-c + if l.value == zString { + if _, ok := IsDomainName(l.token); !ok || l.length == 0 || l.err { + t <- &Token{Error: &ParseError{f, "bad origin name", l}} + return + } + // a new origin is specified. + if l.token[l.length-1] != '.' { + if origin != "." { // Prevent .. endings + neworigin = l.token + "." + origin + } else { + neworigin = l.token + origin + } + } else { + neworigin = l.token + } + } + case zNewline, zEOF: + // Ok + default: + t <- &Token{Error: &ParseError{f, "garbage after $INCLUDE", l}} + return + } + // Start with the new file + r1, e1 := os.Open(l.token) + if e1 != nil { + t <- &Token{Error: &ParseError{f, "failed to open `" + l.token + "'", l}} + return + } + if include+1 > 7 { + t <- &Token{Error: &ParseError{f, "too deeply nested $INCLUDE", l}} + return + } + parseZone(r1, l.token, neworigin, t, include+1) + st = zExpectOwnerDir + case zExpectDirTtlBl: + if l.value != zBlank { + t <- &Token{Error: &ParseError{f, "no blank after $TTL-directive", l}} + return + } + st = zExpectDirTtl + case zExpectDirTtl: + if l.value != zString { + t <- &Token{Error: &ParseError{f, "expecting $TTL value, not this...", l}} + return + } + if e, _ := slurpRemainder(c, f); e != nil { + t <- &Token{Error: e} + return + } + ttl, ok := stringToTtl(l.token) + if !ok { + t <- &Token{Error: &ParseError{f, "expecting $TTL value, not this...", l}} + return + } + defttl = ttl + st = zExpectOwnerDir + case zExpectDirOriginBl: + if l.value != zBlank { + t <- &Token{Error: &ParseError{f, "no blank after $ORIGIN-directive", l}} + return + } + st = zExpectDirOrigin + case zExpectDirOrigin: + if l.value != zString { + t <- &Token{Error: &ParseError{f, "expecting $ORIGIN value, not this...", l}} + return + } + if e, _ := slurpRemainder(c, f); e != nil { + t <- &Token{Error: e} + } + if _, ok := IsDomainName(l.token); !ok { + t <- &Token{Error: &ParseError{f, "bad origin name", l}} + return + } + if l.token[l.length-1] != '.' { + if origin != "." { // Prevent .. endings + origin = l.token + "." + origin + } else { + origin = l.token + origin + } + } else { + origin = l.token + } + st = zExpectOwnerDir + case zExpectDirGenerateBl: + if l.value != zBlank { + t <- &Token{Error: &ParseError{f, "no blank after $GENERATE-directive", l}} + return + } + st = zExpectDirGenerate + case zExpectDirGenerate: + if l.value != zString { + t <- &Token{Error: &ParseError{f, "expecting $GENERATE value, not this...", l}} + return + } + if errMsg := generate(l, c, t, origin); errMsg != "" { + t <- &Token{Error: &ParseError{f, errMsg, l}} + return + } + st = zExpectOwnerDir + case zExpectOwnerBl: + if l.value != zBlank { + t <- &Token{Error: &ParseError{f, "no blank after owner", l}} + return + } + st = zExpectAny + case zExpectAny: + switch l.value { + case zRrtpe: + h.Rrtype = l.torc + st = zExpectRdata + case zClass: + h.Class = l.torc + st = zExpectAnyNoClassBl + case zString: + ttl, ok := stringToTtl(l.token) + if !ok { + t <- &Token{Error: &ParseError{f, "not a TTL", l}} + return + } + h.Ttl = ttl + // defttl = ttl // don't set the defttl here + st = zExpectAnyNoTtlBl + default: + t <- &Token{Error: &ParseError{f, "expecting RR type, TTL or class, not this...", l}} + return + } + case zExpectAnyNoClassBl: + if l.value != zBlank { + t <- &Token{Error: &ParseError{f, "no blank before class", l}} + return + } + st = zExpectAnyNoClass + case zExpectAnyNoTtlBl: + if l.value != zBlank { + t <- &Token{Error: &ParseError{f, "no blank before TTL", l}} + return + } + st = zExpectAnyNoTtl + case zExpectAnyNoTtl: + switch l.value { + case zClass: + h.Class = l.torc + st = zExpectRrtypeBl + case zRrtpe: + h.Rrtype = l.torc + st = zExpectRdata + default: + t <- &Token{Error: &ParseError{f, "expecting RR type or class, not this...", l}} + return + } + case zExpectAnyNoClass: + switch l.value { + case zString: + ttl, ok := stringToTtl(l.token) + if !ok { + t <- &Token{Error: &ParseError{f, "not a TTL", l}} + return + } + h.Ttl = ttl + // defttl = ttl // don't set the def ttl anymore + st = zExpectRrtypeBl + case zRrtpe: + h.Rrtype = l.torc + st = zExpectRdata + default: + t <- &Token{Error: &ParseError{f, "expecting RR type or TTL, not this...", l}} + return + } + case zExpectRrtypeBl: + if l.value != zBlank { + t <- &Token{Error: &ParseError{f, "no blank before RR type", l}} + return + } + st = zExpectRrtype + case zExpectRrtype: + if l.value != zRrtpe { + t <- &Token{Error: &ParseError{f, "unknown RR type", l}} + return + } + h.Rrtype = l.torc + st = zExpectRdata + case zExpectRdata: + r, e, c1 := setRR(h, c, origin, f) + if e != nil { + // If e.lex is nil than we have encounter a unknown RR type + // in that case we substitute our current lex token + if e.lex.token == "" && e.lex.value == 0 { + e.lex = l // Uh, dirty + } + t <- &Token{Error: e} + return + } + t <- &Token{RR: r, Comment: c1} + st = zExpectOwnerDir + } + } + // If we get here, we and the h.Rrtype is still zero, we haven't parsed anything, this + // is not an error, because an empty zone file is still a zone file. +} + +// zlexer scans the sourcefile and returns tokens on the channel c. +func zlexer(s *scan, c chan lex) { + var l lex + str := make([]byte, maxTok) // Should be enough for any token + stri := 0 // Offset in str (0 means empty) + com := make([]byte, maxTok) // Hold comment text + comi := 0 + quote := false + escape := false + space := false + commt := false + rrtype := false + owner := true + brace := 0 + x, err := s.tokenText() + defer close(c) + for err == nil { + l.column = s.position.Column + l.line = s.position.Line + if stri >= maxTok { + l.token = "token length insufficient for parsing" + l.err = true + debug.Printf("[%+v]", l.token) + c <- l + return + } + if comi >= maxTok { + l.token = "comment length insufficient for parsing" + l.err = true + debug.Printf("[%+v]", l.token) + c <- l + return + } + + switch x { + case ' ', '\t': + if escape { + escape = false + str[stri] = x + stri++ + break + } + if quote { + // Inside quotes this is legal + str[stri] = x + stri++ + break + } + if commt { + com[comi] = x + comi++ + break + } + if stri == 0 { + // Space directly in the beginning, handled in the grammar + } else if owner { + // If we have a string and its the first, make it an owner + l.value = zOwner + l.token = string(str[:stri]) + l.tokenUpper = strings.ToUpper(l.token) + l.length = stri + // escape $... start with a \ not a $, so this will work + switch l.tokenUpper { + case "$TTL": + l.value = zDirTtl + case "$ORIGIN": + l.value = zDirOrigin + case "$INCLUDE": + l.value = zDirInclude + case "$GENERATE": + l.value = zDirGenerate + } + debug.Printf("[7 %+v]", l.token) + c <- l + } else { + l.value = zString + l.token = string(str[:stri]) + l.tokenUpper = strings.ToUpper(l.token) + l.length = stri + if !rrtype { + if t, ok := StringToType[l.tokenUpper]; ok { + l.value = zRrtpe + l.torc = t + rrtype = true + } else { + if strings.HasPrefix(l.tokenUpper, "TYPE") { + t, ok := typeToInt(l.token) + if !ok { + l.token = "unknown RR type" + l.err = true + c <- l + return + } + l.value = zRrtpe + l.torc = t + } + } + if t, ok := StringToClass[l.tokenUpper]; ok { + l.value = zClass + l.torc = t + } else { + if strings.HasPrefix(l.tokenUpper, "CLASS") { + t, ok := classToInt(l.token) + if !ok { + l.token = "unknown class" + l.err = true + c <- l + return + } + l.value = zClass + l.torc = t + } + } + } + debug.Printf("[6 %+v]", l.token) + c <- l + } + stri = 0 + // I reverse space stuff here + if !space && !commt { + l.value = zBlank + l.token = " " + l.length = 1 + debug.Printf("[5 %+v]", l.token) + c <- l + } + owner = false + space = true + case ';': + if escape { + escape = false + str[stri] = x + stri++ + break + } + if quote { + // Inside quotes this is legal + str[stri] = x + stri++ + break + } + if stri > 0 { + l.value = zString + l.token = string(str[:stri]) + l.tokenUpper = strings.ToUpper(l.token) + l.length = stri + debug.Printf("[4 %+v]", l.token) + c <- l + stri = 0 + } + commt = true + com[comi] = ';' + comi++ + case '\r': + escape = false + if quote { + str[stri] = x + stri++ + break + } + // discard if outside of quotes + case '\n': + escape = false + // Escaped newline + if quote { + str[stri] = x + stri++ + break + } + // inside quotes this is legal + if commt { + // Reset a comment + commt = false + rrtype = false + stri = 0 + // If not in a brace this ends the comment AND the RR + if brace == 0 { + owner = true + owner = true + l.value = zNewline + l.token = "\n" + l.tokenUpper = l.token + l.length = 1 + l.comment = string(com[:comi]) + debug.Printf("[3 %+v %+v]", l.token, l.comment) + c <- l + l.comment = "" + comi = 0 + break + } + com[comi] = ' ' // convert newline to space + comi++ + break + } + + if brace == 0 { + // If there is previous text, we should output it here + if stri != 0 { + l.value = zString + l.token = string(str[:stri]) + l.tokenUpper = strings.ToUpper(l.token) + + l.length = stri + if !rrtype { + if t, ok := StringToType[l.tokenUpper]; ok { + l.value = zRrtpe + l.torc = t + rrtype = true + } + } + debug.Printf("[2 %+v]", l.token) + c <- l + } + l.value = zNewline + l.token = "\n" + l.tokenUpper = l.token + l.length = 1 + debug.Printf("[1 %+v]", l.token) + c <- l + stri = 0 + commt = false + rrtype = false + owner = true + comi = 0 + } + case '\\': + // comments do not get escaped chars, everything is copied + if commt { + com[comi] = x + comi++ + break + } + // something already escaped must be in string + if escape { + str[stri] = x + stri++ + escape = false + break + } + // something escaped outside of string gets added to string + str[stri] = x + stri++ + escape = true + case '"': + if commt { + com[comi] = x + comi++ + break + } + if escape { + str[stri] = x + stri++ + escape = false + break + } + space = false + // send previous gathered text and the quote + if stri != 0 { + l.value = zString + l.token = string(str[:stri]) + l.tokenUpper = strings.ToUpper(l.token) + l.length = stri + + debug.Printf("[%+v]", l.token) + c <- l + stri = 0 + } + + // send quote itself as separate token + l.value = zQuote + l.token = "\"" + l.tokenUpper = l.token + l.length = 1 + c <- l + quote = !quote + case '(', ')': + if commt { + com[comi] = x + comi++ + break + } + if escape { + str[stri] = x + stri++ + escape = false + break + } + if quote { + str[stri] = x + stri++ + break + } + switch x { + case ')': + brace-- + if brace < 0 { + l.token = "extra closing brace" + l.tokenUpper = l.token + l.err = true + debug.Printf("[%+v]", l.token) + c <- l + return + } + case '(': + brace++ + } + default: + escape = false + if commt { + com[comi] = x + comi++ + break + } + str[stri] = x + stri++ + space = false + } + x, err = s.tokenText() + } + if stri > 0 { + // Send remainder + l.token = string(str[:stri]) + l.tokenUpper = strings.ToUpper(l.token) + l.length = stri + l.value = zString + debug.Printf("[%+v]", l.token) + c <- l + } +} + +// Extract the class number from CLASSxx +func classToInt(token string) (uint16, bool) { + offset := 5 + if len(token) < offset+1 { + return 0, false + } + class, ok := strconv.Atoi(token[offset:]) + if ok != nil || class > maxUint16 { + return 0, false + } + return uint16(class), true +} + +// Extract the rr number from TYPExxx +func typeToInt(token string) (uint16, bool) { + offset := 4 + if len(token) < offset+1 { + return 0, false + } + typ, ok := strconv.Atoi(token[offset:]) + if ok != nil || typ > maxUint16 { + return 0, false + } + return uint16(typ), true +} + +// Parse things like 2w, 2m, etc, Return the time in seconds. +func stringToTtl(token string) (uint32, bool) { + s := uint32(0) + i := uint32(0) + for _, c := range token { + switch c { + case 's', 'S': + s += i + i = 0 + case 'm', 'M': + s += i * 60 + i = 0 + case 'h', 'H': + s += i * 60 * 60 + i = 0 + case 'd', 'D': + s += i * 60 * 60 * 24 + i = 0 + case 'w', 'W': + s += i * 60 * 60 * 24 * 7 + i = 0 + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + i *= 10 + i += uint32(c) - '0' + default: + return 0, false + } + } + return s + i, true +} + +// Parse LOC records' [.][mM] into a +// mantissa exponent format. Token should contain the entire +// string (i.e. no spaces allowed) +func stringToCm(token string) (e, m uint8, ok bool) { + if token[len(token)-1] == 'M' || token[len(token)-1] == 'm' { + token = token[0 : len(token)-1] + } + s := strings.SplitN(token, ".", 2) + var meters, cmeters, val int + var err error + switch len(s) { + case 2: + if cmeters, err = strconv.Atoi(s[1]); err != nil { + return + } + fallthrough + case 1: + if meters, err = strconv.Atoi(s[0]); err != nil { + return + } + case 0: + // huh? + return 0, 0, false + } + ok = true + if meters > 0 { + e = 2 + val = meters + } else { + e = 0 + val = cmeters + } + for val > 10 { + e++ + val /= 10 + } + if e > 9 { + ok = false + } + m = uint8(val) + return +} + +func appendOrigin(name, origin string) string { + if origin == "." { + return name + origin + } + return name + "." + origin +} + +// LOC record helper function +func locCheckNorth(token string, latitude uint32) (uint32, bool) { + switch token { + case "n", "N": + return LOC_EQUATOR + latitude, true + case "s", "S": + return LOC_EQUATOR - latitude, true + } + return latitude, false +} + +// LOC record helper function +func locCheckEast(token string, longitude uint32) (uint32, bool) { + switch token { + case "e", "E": + return LOC_EQUATOR + longitude, true + case "w", "W": + return LOC_EQUATOR - longitude, true + } + return longitude, false +} + +// "Eat" the rest of the "line". Return potential comments +func slurpRemainder(c chan lex, f string) (*ParseError, string) { + l := <-c + com := "" + switch l.value { + case zBlank: + l = <-c + com = l.comment + if l.value != zNewline && l.value != zEOF { + return &ParseError{f, "garbage after rdata", l}, "" + } + case zNewline: + com = l.comment + case zEOF: + default: + return &ParseError{f, "garbage after rdata", l}, "" + } + return nil, com +} + +// Parse a 64 bit-like ipv6 address: "0014:4fff:ff20:ee64" +// Used for NID and L64 record. +func stringToNodeID(l lex) (uint64, *ParseError) { + if len(l.token) < 19 { + return 0, &ParseError{l.token, "bad NID/L64 NodeID/Locator64", l} + } + // There must be three colons at fixes postitions, if not its a parse error + if l.token[4] != ':' && l.token[9] != ':' && l.token[14] != ':' { + return 0, &ParseError{l.token, "bad NID/L64 NodeID/Locator64", l} + } + s := l.token[0:4] + l.token[5:9] + l.token[10:14] + l.token[15:19] + u, err := strconv.ParseUint(s, 16, 64) + if err != nil { + return 0, &ParseError{l.token, "bad NID/L64 NodeID/Locator64", l} + } + return u, nil +} diff --git a/vendor/github.com/miekg/dns/scan_rr.go b/vendor/github.com/miekg/dns/scan_rr.go new file mode 100644 index 00000000..675fc80d --- /dev/null +++ b/vendor/github.com/miekg/dns/scan_rr.go @@ -0,0 +1,2179 @@ +package dns + +import ( + "encoding/base64" + "net" + "strconv" + "strings" +) + +type parserFunc struct { + // Func defines the function that parses the tokens and returns the RR + // or an error. The last string contains any comments in the line as + // they returned by the lexer as well. + Func func(h RR_Header, c chan lex, origin string, file string) (RR, *ParseError, string) + // Signals if the RR ending is of variable length, like TXT or records + // that have Hexadecimal or Base64 as their last element in the Rdata. Records + // that have a fixed ending or for instance A, AAAA, SOA and etc. + Variable bool +} + +// Parse the rdata of each rrtype. +// All data from the channel c is either zString or zBlank. +// After the rdata there may come a zBlank and then a zNewline +// or immediately a zNewline. If this is not the case we flag +// an *ParseError: garbage after rdata. +func setRR(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + parserfunc, ok := typeToparserFunc[h.Rrtype] + if ok { + r, e, cm := parserfunc.Func(h, c, o, f) + if parserfunc.Variable { + return r, e, cm + } + if e != nil { + return nil, e, "" + } + e, cm = slurpRemainder(c, f) + if e != nil { + return nil, e, "" + } + return r, nil, cm + } + // RFC3957 RR (Unknown RR handling) + return setRFC3597(h, c, o, f) +} + +// A remainder of the rdata with embedded spaces, return the parsed string (sans the spaces) +// or an error +func endingToString(c chan lex, errstr, f string) (string, *ParseError, string) { + s := "" + l := <-c // zString + for l.value != zNewline && l.value != zEOF { + if l.err { + return s, &ParseError{f, errstr, l}, "" + } + switch l.value { + case zString: + s += l.token + case zBlank: // Ok + default: + return "", &ParseError{f, errstr, l}, "" + } + l = <-c + } + return s, nil, l.comment +} + +// A remainder of the rdata with embedded spaces, return the parsed string slice (sans the spaces) +// or an error +func endingToTxtSlice(c chan lex, errstr, f string) ([]string, *ParseError, string) { + // Get the remaining data until we see a zNewline + quote := false + l := <-c + var s []string + if l.err { + return s, &ParseError{f, errstr, l}, "" + } + switch l.value == zQuote { + case true: // A number of quoted string + s = make([]string, 0) + empty := true + for l.value != zNewline && l.value != zEOF { + if l.err { + return nil, &ParseError{f, errstr, l}, "" + } + switch l.value { + case zString: + empty = false + if len(l.token) > 255 { + // split up tokens that are larger than 255 into 255-chunks + sx := []string{} + p, i := 0, 255 + for { + if i <= len(l.token) { + sx = append(sx, l.token[p:i]) + } else { + sx = append(sx, l.token[p:]) + break + + } + p, i = p+255, i+255 + } + s = append(s, sx...) + break + } + + s = append(s, l.token) + case zBlank: + if quote { + // zBlank can only be seen in between txt parts. + return nil, &ParseError{f, errstr, l}, "" + } + case zQuote: + if empty && quote { + s = append(s, "") + } + quote = !quote + empty = true + default: + return nil, &ParseError{f, errstr, l}, "" + } + l = <-c + } + if quote { + return nil, &ParseError{f, errstr, l}, "" + } + case false: // Unquoted text record + s = make([]string, 1) + for l.value != zNewline && l.value != zEOF { + if l.err { + return s, &ParseError{f, errstr, l}, "" + } + s[0] += l.token + l = <-c + } + } + return s, nil, l.comment +} + +func setA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(A) + rr.Hdr = h + + l := <-c + if l.length == 0 { // Dynamic updates. + return rr, nil, "" + } + rr.A = net.ParseIP(l.token) + if rr.A == nil || l.err { + return nil, &ParseError{f, "bad A A", l}, "" + } + return rr, nil, "" +} + +func setAAAA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(AAAA) + rr.Hdr = h + + l := <-c + if l.length == 0 { + return rr, nil, "" + } + rr.AAAA = net.ParseIP(l.token) + if rr.AAAA == nil || l.err { + return nil, &ParseError{f, "bad AAAA AAAA", l}, "" + } + return rr, nil, "" +} + +func setNS(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(NS) + rr.Hdr = h + + l := <-c + rr.Ns = l.token + if l.length == 0 { + return rr, nil, "" + } + if l.token == "@" { + rr.Ns = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad NS Ns", l}, "" + } + if rr.Ns[l.length-1] != '.' { + rr.Ns = appendOrigin(rr.Ns, o) + } + return rr, nil, "" +} + +func setPTR(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(PTR) + rr.Hdr = h + + l := <-c + rr.Ptr = l.token + if l.length == 0 { // dynamic update rr. + return rr, nil, "" + } + if l.token == "@" { + rr.Ptr = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad PTR Ptr", l}, "" + } + if rr.Ptr[l.length-1] != '.' { + rr.Ptr = appendOrigin(rr.Ptr, o) + } + return rr, nil, "" +} + +func setNSAPPTR(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(NSAPPTR) + rr.Hdr = h + + l := <-c + rr.Ptr = l.token + if l.length == 0 { + return rr, nil, "" + } + if l.token == "@" { + rr.Ptr = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad NSAP-PTR Ptr", l}, "" + } + if rr.Ptr[l.length-1] != '.' { + rr.Ptr = appendOrigin(rr.Ptr, o) + } + return rr, nil, "" +} + +func setRP(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(RP) + rr.Hdr = h + + l := <-c + rr.Mbox = l.token + if l.length == 0 { + return rr, nil, "" + } + if l.token == "@" { + rr.Mbox = o + } else { + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad RP Mbox", l}, "" + } + if rr.Mbox[l.length-1] != '.' { + rr.Mbox = appendOrigin(rr.Mbox, o) + } + } + <-c // zBlank + l = <-c + rr.Txt = l.token + if l.token == "@" { + rr.Txt = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad RP Txt", l}, "" + } + if rr.Txt[l.length-1] != '.' { + rr.Txt = appendOrigin(rr.Txt, o) + } + return rr, nil, "" +} + +func setMR(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(MR) + rr.Hdr = h + + l := <-c + rr.Mr = l.token + if l.length == 0 { + return rr, nil, "" + } + if l.token == "@" { + rr.Mr = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad MR Mr", l}, "" + } + if rr.Mr[l.length-1] != '.' { + rr.Mr = appendOrigin(rr.Mr, o) + } + return rr, nil, "" +} + +func setMB(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(MB) + rr.Hdr = h + + l := <-c + rr.Mb = l.token + if l.length == 0 { + return rr, nil, "" + } + if l.token == "@" { + rr.Mb = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad MB Mb", l}, "" + } + if rr.Mb[l.length-1] != '.' { + rr.Mb = appendOrigin(rr.Mb, o) + } + return rr, nil, "" +} + +func setMG(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(MG) + rr.Hdr = h + + l := <-c + rr.Mg = l.token + if l.length == 0 { + return rr, nil, "" + } + if l.token == "@" { + rr.Mg = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad MG Mg", l}, "" + } + if rr.Mg[l.length-1] != '.' { + rr.Mg = appendOrigin(rr.Mg, o) + } + return rr, nil, "" +} + +func setHINFO(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(HINFO) + rr.Hdr = h + + chunks, e, c1 := endingToTxtSlice(c, "bad HINFO Fields", f) + if e != nil { + return nil, e, c1 + } + + if ln := len(chunks); ln == 0 { + return rr, nil, "" + } else if ln == 1 { + // Can we split it? + if out := strings.Fields(chunks[0]); len(out) > 1 { + chunks = out + } else { + chunks = append(chunks, "") + } + } + + rr.Cpu = chunks[0] + rr.Os = strings.Join(chunks[1:], " ") + + return rr, nil, "" +} + +func setMINFO(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(MINFO) + rr.Hdr = h + + l := <-c + rr.Rmail = l.token + if l.length == 0 { + return rr, nil, "" + } + if l.token == "@" { + rr.Rmail = o + } else { + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad MINFO Rmail", l}, "" + } + if rr.Rmail[l.length-1] != '.' { + rr.Rmail = appendOrigin(rr.Rmail, o) + } + } + <-c // zBlank + l = <-c + rr.Email = l.token + if l.token == "@" { + rr.Email = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad MINFO Email", l}, "" + } + if rr.Email[l.length-1] != '.' { + rr.Email = appendOrigin(rr.Email, o) + } + return rr, nil, "" +} + +func setMF(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(MF) + rr.Hdr = h + + l := <-c + rr.Mf = l.token + if l.length == 0 { + return rr, nil, "" + } + if l.token == "@" { + rr.Mf = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad MF Mf", l}, "" + } + if rr.Mf[l.length-1] != '.' { + rr.Mf = appendOrigin(rr.Mf, o) + } + return rr, nil, "" +} + +func setMD(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(MD) + rr.Hdr = h + + l := <-c + rr.Md = l.token + if l.length == 0 { + return rr, nil, "" + } + if l.token == "@" { + rr.Md = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad MD Md", l}, "" + } + if rr.Md[l.length-1] != '.' { + rr.Md = appendOrigin(rr.Md, o) + } + return rr, nil, "" +} + +func setMX(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(MX) + rr.Hdr = h + + l := <-c + if l.length == 0 { + return rr, nil, "" + } + i, e := strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad MX Pref", l}, "" + } + rr.Preference = uint16(i) + <-c // zBlank + l = <-c // zString + rr.Mx = l.token + if l.token == "@" { + rr.Mx = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad MX Mx", l}, "" + } + if rr.Mx[l.length-1] != '.' { + rr.Mx = appendOrigin(rr.Mx, o) + } + return rr, nil, "" +} + +func setRT(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(RT) + rr.Hdr = h + l := <-c + if l.length == 0 { + return rr, nil, "" + } + i, e := strconv.Atoi(l.token) + if e != nil { + return nil, &ParseError{f, "bad RT Preference", l}, "" + } + rr.Preference = uint16(i) + <-c // zBlank + l = <-c // zString + rr.Host = l.token + if l.token == "@" { + rr.Host = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad RT Host", l}, "" + } + if rr.Host[l.length-1] != '.' { + rr.Host = appendOrigin(rr.Host, o) + } + return rr, nil, "" +} + +func setAFSDB(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(AFSDB) + rr.Hdr = h + + l := <-c + if l.length == 0 { + return rr, nil, "" + } + i, e := strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad AFSDB Subtype", l}, "" + } + rr.Subtype = uint16(i) + <-c // zBlank + l = <-c // zString + rr.Hostname = l.token + if l.token == "@" { + rr.Hostname = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad AFSDB Hostname", l}, "" + } + if rr.Hostname[l.length-1] != '.' { + rr.Hostname = appendOrigin(rr.Hostname, o) + } + return rr, nil, "" +} + +func setX25(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(X25) + rr.Hdr = h + + l := <-c + if l.length == 0 { + return rr, nil, "" + } + if l.err { + return nil, &ParseError{f, "bad X25 PSDNAddress", l}, "" + } + rr.PSDNAddress = l.token + return rr, nil, "" +} + +func setKX(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(KX) + rr.Hdr = h + + l := <-c + if l.length == 0 { + return rr, nil, "" + } + i, e := strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad KX Pref", l}, "" + } + rr.Preference = uint16(i) + <-c // zBlank + l = <-c // zString + rr.Exchanger = l.token + if l.token == "@" { + rr.Exchanger = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad KX Exchanger", l}, "" + } + if rr.Exchanger[l.length-1] != '.' { + rr.Exchanger = appendOrigin(rr.Exchanger, o) + } + return rr, nil, "" +} + +func setCNAME(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(CNAME) + rr.Hdr = h + + l := <-c + rr.Target = l.token + if l.length == 0 { + return rr, nil, "" + } + if l.token == "@" { + rr.Target = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad CNAME Target", l}, "" + } + if rr.Target[l.length-1] != '.' { + rr.Target = appendOrigin(rr.Target, o) + } + return rr, nil, "" +} + +func setDNAME(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(DNAME) + rr.Hdr = h + + l := <-c + rr.Target = l.token + if l.length == 0 { + return rr, nil, "" + } + if l.token == "@" { + rr.Target = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad CNAME Target", l}, "" + } + if rr.Target[l.length-1] != '.' { + rr.Target = appendOrigin(rr.Target, o) + } + return rr, nil, "" +} + +func setSOA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(SOA) + rr.Hdr = h + + l := <-c + rr.Ns = l.token + if l.length == 0 { + return rr, nil, "" + } + <-c // zBlank + if l.token == "@" { + rr.Ns = o + } else { + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad SOA Ns", l}, "" + } + if rr.Ns[l.length-1] != '.' { + rr.Ns = appendOrigin(rr.Ns, o) + } + } + + l = <-c + rr.Mbox = l.token + if l.token == "@" { + rr.Mbox = o + } else { + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad SOA Mbox", l}, "" + } + if rr.Mbox[l.length-1] != '.' { + rr.Mbox = appendOrigin(rr.Mbox, o) + } + } + <-c // zBlank + + var ( + v uint32 + ok bool + ) + for i := 0; i < 5; i++ { + l = <-c + if l.err { + return nil, &ParseError{f, "bad SOA zone parameter", l}, "" + } + if j, e := strconv.Atoi(l.token); e != nil { + if i == 0 { + // Serial should be a number + return nil, &ParseError{f, "bad SOA zone parameter", l}, "" + } + if v, ok = stringToTtl(l.token); !ok { + return nil, &ParseError{f, "bad SOA zone parameter", l}, "" + + } + } else { + v = uint32(j) + } + switch i { + case 0: + rr.Serial = v + <-c // zBlank + case 1: + rr.Refresh = v + <-c // zBlank + case 2: + rr.Retry = v + <-c // zBlank + case 3: + rr.Expire = v + <-c // zBlank + case 4: + rr.Minttl = v + } + } + return rr, nil, "" +} + +func setSRV(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(SRV) + rr.Hdr = h + + l := <-c + if l.length == 0 { + return rr, nil, "" + } + i, e := strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad SRV Priority", l}, "" + } + rr.Priority = uint16(i) + <-c // zBlank + l = <-c // zString + i, e = strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad SRV Weight", l}, "" + } + rr.Weight = uint16(i) + <-c // zBlank + l = <-c // zString + i, e = strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad SRV Port", l}, "" + } + rr.Port = uint16(i) + <-c // zBlank + l = <-c // zString + rr.Target = l.token + if l.token == "@" { + rr.Target = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad SRV Target", l}, "" + } + if rr.Target[l.length-1] != '.' { + rr.Target = appendOrigin(rr.Target, o) + } + return rr, nil, "" +} + +func setNAPTR(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(NAPTR) + rr.Hdr = h + + l := <-c + if l.length == 0 { + return rr, nil, "" + } + i, e := strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad NAPTR Order", l}, "" + } + rr.Order = uint16(i) + <-c // zBlank + l = <-c // zString + i, e = strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad NAPTR Preference", l}, "" + } + rr.Preference = uint16(i) + // Flags + <-c // zBlank + l = <-c // _QUOTE + if l.value != zQuote { + return nil, &ParseError{f, "bad NAPTR Flags", l}, "" + } + l = <-c // Either String or Quote + if l.value == zString { + rr.Flags = l.token + l = <-c // _QUOTE + if l.value != zQuote { + return nil, &ParseError{f, "bad NAPTR Flags", l}, "" + } + } else if l.value == zQuote { + rr.Flags = "" + } else { + return nil, &ParseError{f, "bad NAPTR Flags", l}, "" + } + + // Service + <-c // zBlank + l = <-c // _QUOTE + if l.value != zQuote { + return nil, &ParseError{f, "bad NAPTR Service", l}, "" + } + l = <-c // Either String or Quote + if l.value == zString { + rr.Service = l.token + l = <-c // _QUOTE + if l.value != zQuote { + return nil, &ParseError{f, "bad NAPTR Service", l}, "" + } + } else if l.value == zQuote { + rr.Service = "" + } else { + return nil, &ParseError{f, "bad NAPTR Service", l}, "" + } + + // Regexp + <-c // zBlank + l = <-c // _QUOTE + if l.value != zQuote { + return nil, &ParseError{f, "bad NAPTR Regexp", l}, "" + } + l = <-c // Either String or Quote + if l.value == zString { + rr.Regexp = l.token + l = <-c // _QUOTE + if l.value != zQuote { + return nil, &ParseError{f, "bad NAPTR Regexp", l}, "" + } + } else if l.value == zQuote { + rr.Regexp = "" + } else { + return nil, &ParseError{f, "bad NAPTR Regexp", l}, "" + } + // After quote no space?? + <-c // zBlank + l = <-c // zString + rr.Replacement = l.token + if l.token == "@" { + rr.Replacement = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad NAPTR Replacement", l}, "" + } + if rr.Replacement[l.length-1] != '.' { + rr.Replacement = appendOrigin(rr.Replacement, o) + } + return rr, nil, "" +} + +func setTALINK(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(TALINK) + rr.Hdr = h + + l := <-c + rr.PreviousName = l.token + if l.length == 0 { + return rr, nil, "" + } + if l.token == "@" { + rr.PreviousName = o + } else { + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad TALINK PreviousName", l}, "" + } + if rr.PreviousName[l.length-1] != '.' { + rr.PreviousName = appendOrigin(rr.PreviousName, o) + } + } + <-c // zBlank + l = <-c + rr.NextName = l.token + if l.token == "@" { + rr.NextName = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad TALINK NextName", l}, "" + } + if rr.NextName[l.length-1] != '.' { + rr.NextName = appendOrigin(rr.NextName, o) + } + return rr, nil, "" +} + +func setLOC(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(LOC) + rr.Hdr = h + // Non zero defaults for LOC record, see RFC 1876, Section 3. + rr.HorizPre = 165 // 10000 + rr.VertPre = 162 // 10 + rr.Size = 18 // 1 + ok := false + // North + l := <-c + if l.length == 0 { + return rr, nil, "" + } + i, e := strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad LOC Latitude", l}, "" + } + rr.Latitude = 1000 * 60 * 60 * uint32(i) + + <-c // zBlank + // Either number, 'N' or 'S' + l = <-c + if rr.Latitude, ok = locCheckNorth(l.token, rr.Latitude); ok { + goto East + } + i, e = strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad LOC Latitude minutes", l}, "" + } + rr.Latitude += 1000 * 60 * uint32(i) + + <-c // zBlank + l = <-c + if i, e := strconv.ParseFloat(l.token, 32); e != nil || l.err { + return nil, &ParseError{f, "bad LOC Latitude seconds", l}, "" + } else { + rr.Latitude += uint32(1000 * i) + } + <-c // zBlank + // Either number, 'N' or 'S' + l = <-c + if rr.Latitude, ok = locCheckNorth(l.token, rr.Latitude); ok { + goto East + } + // If still alive, flag an error + return nil, &ParseError{f, "bad LOC Latitude North/South", l}, "" + +East: + // East + <-c // zBlank + l = <-c + if i, e := strconv.Atoi(l.token); e != nil || l.err { + return nil, &ParseError{f, "bad LOC Longitude", l}, "" + } else { + rr.Longitude = 1000 * 60 * 60 * uint32(i) + } + <-c // zBlank + // Either number, 'E' or 'W' + l = <-c + if rr.Longitude, ok = locCheckEast(l.token, rr.Longitude); ok { + goto Altitude + } + if i, e := strconv.Atoi(l.token); e != nil || l.err { + return nil, &ParseError{f, "bad LOC Longitude minutes", l}, "" + } else { + rr.Longitude += 1000 * 60 * uint32(i) + } + <-c // zBlank + l = <-c + if i, e := strconv.ParseFloat(l.token, 32); e != nil || l.err { + return nil, &ParseError{f, "bad LOC Longitude seconds", l}, "" + } else { + rr.Longitude += uint32(1000 * i) + } + <-c // zBlank + // Either number, 'E' or 'W' + l = <-c + if rr.Longitude, ok = locCheckEast(l.token, rr.Longitude); ok { + goto Altitude + } + // If still alive, flag an error + return nil, &ParseError{f, "bad LOC Longitude East/West", l}, "" + +Altitude: + <-c // zBlank + l = <-c + if l.length == 0 || l.err { + return nil, &ParseError{f, "bad LOC Altitude", l}, "" + } + if l.token[len(l.token)-1] == 'M' || l.token[len(l.token)-1] == 'm' { + l.token = l.token[0 : len(l.token)-1] + } + if i, e := strconv.ParseFloat(l.token, 32); e != nil { + return nil, &ParseError{f, "bad LOC Altitude", l}, "" + } else { + rr.Altitude = uint32(i*100.0 + 10000000.0 + 0.5) + } + + // And now optionally the other values + l = <-c + count := 0 + for l.value != zNewline && l.value != zEOF { + switch l.value { + case zString: + switch count { + case 0: // Size + e, m, ok := stringToCm(l.token) + if !ok { + return nil, &ParseError{f, "bad LOC Size", l}, "" + } + rr.Size = (e & 0x0f) | (m << 4 & 0xf0) + case 1: // HorizPre + e, m, ok := stringToCm(l.token) + if !ok { + return nil, &ParseError{f, "bad LOC HorizPre", l}, "" + } + rr.HorizPre = (e & 0x0f) | (m << 4 & 0xf0) + case 2: // VertPre + e, m, ok := stringToCm(l.token) + if !ok { + return nil, &ParseError{f, "bad LOC VertPre", l}, "" + } + rr.VertPre = (e & 0x0f) | (m << 4 & 0xf0) + } + count++ + case zBlank: + // Ok + default: + return nil, &ParseError{f, "bad LOC Size, HorizPre or VertPre", l}, "" + } + l = <-c + } + return rr, nil, "" +} + +func setHIP(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(HIP) + rr.Hdr = h + + // HitLength is not represented + l := <-c + if l.length == 0 { + return rr, nil, l.comment + } + i, e := strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad HIP PublicKeyAlgorithm", l}, "" + } + rr.PublicKeyAlgorithm = uint8(i) + <-c // zBlank + l = <-c // zString + if l.length == 0 || l.err { + return nil, &ParseError{f, "bad HIP Hit", l}, "" + } + rr.Hit = l.token // This can not contain spaces, see RFC 5205 Section 6. + rr.HitLength = uint8(len(rr.Hit)) / 2 + + <-c // zBlank + l = <-c // zString + if l.length == 0 || l.err { + return nil, &ParseError{f, "bad HIP PublicKey", l}, "" + } + rr.PublicKey = l.token // This cannot contain spaces + rr.PublicKeyLength = uint16(base64.StdEncoding.DecodedLen(len(rr.PublicKey))) + + // RendezvousServers (if any) + l = <-c + var xs []string + for l.value != zNewline && l.value != zEOF { + switch l.value { + case zString: + if l.token == "@" { + xs = append(xs, o) + l = <-c + continue + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad HIP RendezvousServers", l}, "" + } + if l.token[l.length-1] != '.' { + l.token = appendOrigin(l.token, o) + } + xs = append(xs, l.token) + case zBlank: + // Ok + default: + return nil, &ParseError{f, "bad HIP RendezvousServers", l}, "" + } + l = <-c + } + rr.RendezvousServers = xs + return rr, nil, l.comment +} + +func setCERT(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(CERT) + rr.Hdr = h + + l := <-c + if l.length == 0 { + return rr, nil, l.comment + } + if v, ok := StringToCertType[l.token]; ok { + rr.Type = v + } else if i, e := strconv.Atoi(l.token); e != nil { + return nil, &ParseError{f, "bad CERT Type", l}, "" + } else { + rr.Type = uint16(i) + } + <-c // zBlank + l = <-c // zString + i, e := strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad CERT KeyTag", l}, "" + } + rr.KeyTag = uint16(i) + <-c // zBlank + l = <-c // zString + if v, ok := StringToAlgorithm[l.token]; ok { + rr.Algorithm = v + } else if i, e := strconv.Atoi(l.token); e != nil { + return nil, &ParseError{f, "bad CERT Algorithm", l}, "" + } else { + rr.Algorithm = uint8(i) + } + s, e1, c1 := endingToString(c, "bad CERT Certificate", f) + if e1 != nil { + return nil, e1, c1 + } + rr.Certificate = s + return rr, nil, c1 +} + +func setOPENPGPKEY(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(OPENPGPKEY) + rr.Hdr = h + + s, e, c1 := endingToString(c, "bad OPENPGPKEY PublicKey", f) + if e != nil { + return nil, e, c1 + } + rr.PublicKey = s + return rr, nil, c1 +} + +func setSIG(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + r, e, s := setRRSIG(h, c, o, f) + if r != nil { + return &SIG{*r.(*RRSIG)}, e, s + } + return nil, e, s +} + +func setRRSIG(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(RRSIG) + rr.Hdr = h + l := <-c + if l.length == 0 { + return rr, nil, l.comment + } + if t, ok := StringToType[l.tokenUpper]; !ok { + if strings.HasPrefix(l.tokenUpper, "TYPE") { + t, ok = typeToInt(l.tokenUpper) + if !ok { + return nil, &ParseError{f, "bad RRSIG Typecovered", l}, "" + } + rr.TypeCovered = t + } else { + return nil, &ParseError{f, "bad RRSIG Typecovered", l}, "" + } + } else { + rr.TypeCovered = t + } + <-c // zBlank + l = <-c + i, err := strconv.Atoi(l.token) + if err != nil || l.err { + return nil, &ParseError{f, "bad RRSIG Algorithm", l}, "" + } + rr.Algorithm = uint8(i) + <-c // zBlank + l = <-c + i, err = strconv.Atoi(l.token) + if err != nil || l.err { + return nil, &ParseError{f, "bad RRSIG Labels", l}, "" + } + rr.Labels = uint8(i) + <-c // zBlank + l = <-c + i, err = strconv.Atoi(l.token) + if err != nil || l.err { + return nil, &ParseError{f, "bad RRSIG OrigTtl", l}, "" + } + rr.OrigTtl = uint32(i) + <-c // zBlank + l = <-c + if i, err := StringToTime(l.token); err != nil { + // Try to see if all numeric and use it as epoch + if i, err := strconv.ParseInt(l.token, 10, 64); err == nil { + // TODO(miek): error out on > MAX_UINT32, same below + rr.Expiration = uint32(i) + } else { + return nil, &ParseError{f, "bad RRSIG Expiration", l}, "" + } + } else { + rr.Expiration = i + } + <-c // zBlank + l = <-c + if i, err := StringToTime(l.token); err != nil { + if i, err := strconv.ParseInt(l.token, 10, 64); err == nil { + rr.Inception = uint32(i) + } else { + return nil, &ParseError{f, "bad RRSIG Inception", l}, "" + } + } else { + rr.Inception = i + } + <-c // zBlank + l = <-c + i, err = strconv.Atoi(l.token) + if err != nil || l.err { + return nil, &ParseError{f, "bad RRSIG KeyTag", l}, "" + } + rr.KeyTag = uint16(i) + <-c // zBlank + l = <-c + rr.SignerName = l.token + if l.token == "@" { + rr.SignerName = o + } else { + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad RRSIG SignerName", l}, "" + } + if rr.SignerName[l.length-1] != '.' { + rr.SignerName = appendOrigin(rr.SignerName, o) + } + } + s, e, c1 := endingToString(c, "bad RRSIG Signature", f) + if e != nil { + return nil, e, c1 + } + rr.Signature = s + return rr, nil, c1 +} + +func setNSEC(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(NSEC) + rr.Hdr = h + + l := <-c + rr.NextDomain = l.token + if l.length == 0 { + return rr, nil, l.comment + } + if l.token == "@" { + rr.NextDomain = o + } else { + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad NSEC NextDomain", l}, "" + } + if rr.NextDomain[l.length-1] != '.' { + rr.NextDomain = appendOrigin(rr.NextDomain, o) + } + } + + rr.TypeBitMap = make([]uint16, 0) + var ( + k uint16 + ok bool + ) + l = <-c + for l.value != zNewline && l.value != zEOF { + switch l.value { + case zBlank: + // Ok + case zString: + if k, ok = StringToType[l.tokenUpper]; !ok { + if k, ok = typeToInt(l.tokenUpper); !ok { + return nil, &ParseError{f, "bad NSEC TypeBitMap", l}, "" + } + } + rr.TypeBitMap = append(rr.TypeBitMap, k) + default: + return nil, &ParseError{f, "bad NSEC TypeBitMap", l}, "" + } + l = <-c + } + return rr, nil, l.comment +} + +func setNSEC3(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(NSEC3) + rr.Hdr = h + + l := <-c + if l.length == 0 { + return rr, nil, l.comment + } + i, e := strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad NSEC3 Hash", l}, "" + } + rr.Hash = uint8(i) + <-c // zBlank + l = <-c + i, e = strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad NSEC3 Flags", l}, "" + } + rr.Flags = uint8(i) + <-c // zBlank + l = <-c + i, e = strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad NSEC3 Iterations", l}, "" + } + rr.Iterations = uint16(i) + <-c + l = <-c + if len(l.token) == 0 || l.err { + return nil, &ParseError{f, "bad NSEC3 Salt", l}, "" + } + rr.SaltLength = uint8(len(l.token)) / 2 + rr.Salt = l.token + + <-c + l = <-c + if len(l.token) == 0 || l.err { + return nil, &ParseError{f, "bad NSEC3 NextDomain", l}, "" + } + rr.HashLength = 20 // Fix for NSEC3 (sha1 160 bits) + rr.NextDomain = l.token + + rr.TypeBitMap = make([]uint16, 0) + var ( + k uint16 + ok bool + ) + l = <-c + for l.value != zNewline && l.value != zEOF { + switch l.value { + case zBlank: + // Ok + case zString: + if k, ok = StringToType[l.tokenUpper]; !ok { + if k, ok = typeToInt(l.tokenUpper); !ok { + return nil, &ParseError{f, "bad NSEC3 TypeBitMap", l}, "" + } + } + rr.TypeBitMap = append(rr.TypeBitMap, k) + default: + return nil, &ParseError{f, "bad NSEC3 TypeBitMap", l}, "" + } + l = <-c + } + return rr, nil, l.comment +} + +func setNSEC3PARAM(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(NSEC3PARAM) + rr.Hdr = h + + l := <-c + if l.length == 0 { + return rr, nil, "" + } + i, e := strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad NSEC3PARAM Hash", l}, "" + } + rr.Hash = uint8(i) + <-c // zBlank + l = <-c + i, e = strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad NSEC3PARAM Flags", l}, "" + } + rr.Flags = uint8(i) + <-c // zBlank + l = <-c + i, e = strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad NSEC3PARAM Iterations", l}, "" + } + rr.Iterations = uint16(i) + <-c + l = <-c + rr.SaltLength = uint8(len(l.token)) + rr.Salt = l.token + return rr, nil, "" +} + +func setEUI48(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(EUI48) + rr.Hdr = h + + l := <-c + if l.length == 0 { + return rr, nil, "" + } + if l.length != 17 || l.err { + return nil, &ParseError{f, "bad EUI48 Address", l}, "" + } + addr := make([]byte, 12) + dash := 0 + for i := 0; i < 10; i += 2 { + addr[i] = l.token[i+dash] + addr[i+1] = l.token[i+1+dash] + dash++ + if l.token[i+1+dash] != '-' { + return nil, &ParseError{f, "bad EUI48 Address", l}, "" + } + } + addr[10] = l.token[15] + addr[11] = l.token[16] + + i, e := strconv.ParseUint(string(addr), 16, 48) + if e != nil { + return nil, &ParseError{f, "bad EUI48 Address", l}, "" + } + rr.Address = i + return rr, nil, "" +} + +func setEUI64(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(EUI64) + rr.Hdr = h + + l := <-c + if l.length == 0 { + return rr, nil, "" + } + if l.length != 23 || l.err { + return nil, &ParseError{f, "bad EUI64 Address", l}, "" + } + addr := make([]byte, 16) + dash := 0 + for i := 0; i < 14; i += 2 { + addr[i] = l.token[i+dash] + addr[i+1] = l.token[i+1+dash] + dash++ + if l.token[i+1+dash] != '-' { + return nil, &ParseError{f, "bad EUI64 Address", l}, "" + } + } + addr[14] = l.token[21] + addr[15] = l.token[22] + + i, e := strconv.ParseUint(string(addr), 16, 64) + if e != nil { + return nil, &ParseError{f, "bad EUI68 Address", l}, "" + } + rr.Address = uint64(i) + return rr, nil, "" +} + +func setSSHFP(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(SSHFP) + rr.Hdr = h + + l := <-c + if l.length == 0 { + return rr, nil, "" + } + i, e := strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad SSHFP Algorithm", l}, "" + } + rr.Algorithm = uint8(i) + <-c // zBlank + l = <-c + i, e = strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad SSHFP Type", l}, "" + } + rr.Type = uint8(i) + <-c // zBlank + s, e1, c1 := endingToString(c, "bad SSHFP Fingerprint", f) + if e1 != nil { + return nil, e1, c1 + } + rr.FingerPrint = s + return rr, nil, "" +} + +func setDNSKEYs(h RR_Header, c chan lex, o, f, typ string) (RR, *ParseError, string) { + rr := new(DNSKEY) + rr.Hdr = h + + l := <-c + if l.length == 0 { + return rr, nil, l.comment + } + i, e := strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad " + typ + " Flags", l}, "" + } + rr.Flags = uint16(i) + <-c // zBlank + l = <-c // zString + i, e = strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad " + typ + " Protocol", l}, "" + } + rr.Protocol = uint8(i) + <-c // zBlank + l = <-c // zString + i, e = strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad " + typ + " Algorithm", l}, "" + } + rr.Algorithm = uint8(i) + s, e1, c1 := endingToString(c, "bad "+typ+" PublicKey", f) + if e1 != nil { + return nil, e1, c1 + } + rr.PublicKey = s + return rr, nil, c1 +} + +func setKEY(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + r, e, s := setDNSKEYs(h, c, o, f, "KEY") + if r != nil { + return &KEY{*r.(*DNSKEY)}, e, s + } + return nil, e, s +} + +func setDNSKEY(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + r, e, s := setDNSKEYs(h, c, o, f, "DNSKEY") + return r, e, s +} + +func setCDNSKEY(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + r, e, s := setDNSKEYs(h, c, o, f, "CDNSKEY") + if r != nil { + return &CDNSKEY{*r.(*DNSKEY)}, e, s + } + return nil, e, s +} + +func setRKEY(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(RKEY) + rr.Hdr = h + + l := <-c + if l.length == 0 { + return rr, nil, l.comment + } + i, e := strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad RKEY Flags", l}, "" + } + rr.Flags = uint16(i) + <-c // zBlank + l = <-c // zString + i, e = strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad RKEY Protocol", l}, "" + } + rr.Protocol = uint8(i) + <-c // zBlank + l = <-c // zString + i, e = strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad RKEY Algorithm", l}, "" + } + rr.Algorithm = uint8(i) + s, e1, c1 := endingToString(c, "bad RKEY PublicKey", f) + if e1 != nil { + return nil, e1, c1 + } + rr.PublicKey = s + return rr, nil, c1 +} + +func setEID(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(EID) + rr.Hdr = h + s, e, c1 := endingToString(c, "bad EID Endpoint", f) + if e != nil { + return nil, e, c1 + } + rr.Endpoint = s + return rr, nil, c1 +} + +func setNIMLOC(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(NIMLOC) + rr.Hdr = h + s, e, c1 := endingToString(c, "bad NIMLOC Locator", f) + if e != nil { + return nil, e, c1 + } + rr.Locator = s + return rr, nil, c1 +} + +func setGPOS(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(GPOS) + rr.Hdr = h + l := <-c + if l.length == 0 { + return rr, nil, "" + } + _, e := strconv.ParseFloat(l.token, 64) + if e != nil || l.err { + return nil, &ParseError{f, "bad GPOS Longitude", l}, "" + } + rr.Longitude = l.token + <-c // zBlank + l = <-c + _, e = strconv.ParseFloat(l.token, 64) + if e != nil || l.err { + return nil, &ParseError{f, "bad GPOS Latitude", l}, "" + } + rr.Latitude = l.token + <-c // zBlank + l = <-c + _, e = strconv.ParseFloat(l.token, 64) + if e != nil || l.err { + return nil, &ParseError{f, "bad GPOS Altitude", l}, "" + } + rr.Altitude = l.token + return rr, nil, "" +} + +func setDSs(h RR_Header, c chan lex, o, f, typ string) (RR, *ParseError, string) { + rr := new(DS) + rr.Hdr = h + l := <-c + if l.length == 0 { + return rr, nil, l.comment + } + i, e := strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad " + typ + " KeyTag", l}, "" + } + rr.KeyTag = uint16(i) + <-c // zBlank + l = <-c + if i, e := strconv.Atoi(l.token); e != nil { + i, ok := StringToAlgorithm[l.tokenUpper] + if !ok || l.err { + return nil, &ParseError{f, "bad " + typ + " Algorithm", l}, "" + } + rr.Algorithm = i + } else { + rr.Algorithm = uint8(i) + } + <-c // zBlank + l = <-c + i, e = strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad " + typ + " DigestType", l}, "" + } + rr.DigestType = uint8(i) + s, e1, c1 := endingToString(c, "bad "+typ+" Digest", f) + if e1 != nil { + return nil, e1, c1 + } + rr.Digest = s + return rr, nil, c1 +} + +func setDS(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + r, e, s := setDSs(h, c, o, f, "DS") + return r, e, s +} + +func setDLV(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + r, e, s := setDSs(h, c, o, f, "DLV") + if r != nil { + return &DLV{*r.(*DS)}, e, s + } + return nil, e, s +} + +func setCDS(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + r, e, s := setDSs(h, c, o, f, "CDS") + if r != nil { + return &CDS{*r.(*DS)}, e, s + } + return nil, e, s +} + +func setTA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(TA) + rr.Hdr = h + l := <-c + if l.length == 0 { + return rr, nil, l.comment + } + i, e := strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad TA KeyTag", l}, "" + } + rr.KeyTag = uint16(i) + <-c // zBlank + l = <-c + if i, e := strconv.Atoi(l.token); e != nil { + i, ok := StringToAlgorithm[l.tokenUpper] + if !ok || l.err { + return nil, &ParseError{f, "bad TA Algorithm", l}, "" + } + rr.Algorithm = i + } else { + rr.Algorithm = uint8(i) + } + <-c // zBlank + l = <-c + i, e = strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad TA DigestType", l}, "" + } + rr.DigestType = uint8(i) + s, e, c1 := endingToString(c, "bad TA Digest", f) + if e != nil { + return nil, e.(*ParseError), c1 + } + rr.Digest = s + return rr, nil, c1 +} + +func setTLSA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(TLSA) + rr.Hdr = h + l := <-c + if l.length == 0 { + return rr, nil, l.comment + } + i, e := strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad TLSA Usage", l}, "" + } + rr.Usage = uint8(i) + <-c // zBlank + l = <-c + i, e = strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad TLSA Selector", l}, "" + } + rr.Selector = uint8(i) + <-c // zBlank + l = <-c + i, e = strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad TLSA MatchingType", l}, "" + } + rr.MatchingType = uint8(i) + // So this needs be e2 (i.e. different than e), because...??t + s, e2, c1 := endingToString(c, "bad TLSA Certificate", f) + if e2 != nil { + return nil, e2, c1 + } + rr.Certificate = s + return rr, nil, c1 +} + +func setSMIMEA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(SMIMEA) + rr.Hdr = h + l := <-c + if l.length == 0 { + return rr, nil, l.comment + } + i, e := strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad SMIMEA Usage", l}, "" + } + rr.Usage = uint8(i) + <-c // zBlank + l = <-c + i, e = strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad SMIMEA Selector", l}, "" + } + rr.Selector = uint8(i) + <-c // zBlank + l = <-c + i, e = strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad SMIMEA MatchingType", l}, "" + } + rr.MatchingType = uint8(i) + // So this needs be e2 (i.e. different than e), because...??t + s, e2, c1 := endingToString(c, "bad SMIMEA Certificate", f) + if e2 != nil { + return nil, e2, c1 + } + rr.Certificate = s + return rr, nil, c1 +} + +func setRFC3597(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(RFC3597) + rr.Hdr = h + l := <-c + if l.token != "\\#" { + return nil, &ParseError{f, "bad RFC3597 Rdata", l}, "" + } + <-c // zBlank + l = <-c + rdlength, e := strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad RFC3597 Rdata ", l}, "" + } + + s, e1, c1 := endingToString(c, "bad RFC3597 Rdata", f) + if e1 != nil { + return nil, e1, c1 + } + if rdlength*2 != len(s) { + return nil, &ParseError{f, "bad RFC3597 Rdata", l}, "" + } + rr.Rdata = s + return rr, nil, c1 +} + +func setSPF(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(SPF) + rr.Hdr = h + + s, e, c1 := endingToTxtSlice(c, "bad SPF Txt", f) + if e != nil { + return nil, e, "" + } + rr.Txt = s + return rr, nil, c1 +} + +func setTXT(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(TXT) + rr.Hdr = h + + // no zBlank reading here, because all this rdata is TXT + s, e, c1 := endingToTxtSlice(c, "bad TXT Txt", f) + if e != nil { + return nil, e, "" + } + rr.Txt = s + return rr, nil, c1 +} + +// identical to setTXT +func setNINFO(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(NINFO) + rr.Hdr = h + + s, e, c1 := endingToTxtSlice(c, "bad NINFO ZSData", f) + if e != nil { + return nil, e, "" + } + rr.ZSData = s + return rr, nil, c1 +} + +func setURI(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(URI) + rr.Hdr = h + + l := <-c + if l.length == 0 { // Dynamic updates. + return rr, nil, "" + } + + i, e := strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad URI Priority", l}, "" + } + rr.Priority = uint16(i) + <-c // zBlank + l = <-c + i, e = strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad URI Weight", l}, "" + } + rr.Weight = uint16(i) + + <-c // zBlank + s, err, c1 := endingToTxtSlice(c, "bad URI Target", f) + if err != nil { + return nil, err, "" + } + if len(s) > 1 { + return nil, &ParseError{f, "bad URI Target", l}, "" + } + rr.Target = s[0] + return rr, nil, c1 +} + +func setDHCID(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + // awesome record to parse! + rr := new(DHCID) + rr.Hdr = h + + s, e, c1 := endingToString(c, "bad DHCID Digest", f) + if e != nil { + return nil, e, c1 + } + rr.Digest = s + return rr, nil, c1 +} + +func setNID(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(NID) + rr.Hdr = h + + l := <-c + if l.length == 0 { + return rr, nil, "" + } + i, e := strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad NID Preference", l}, "" + } + rr.Preference = uint16(i) + <-c // zBlank + l = <-c // zString + u, err := stringToNodeID(l) + if err != nil || l.err { + return nil, err, "" + } + rr.NodeID = u + return rr, nil, "" +} + +func setL32(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(L32) + rr.Hdr = h + + l := <-c + if l.length == 0 { + return rr, nil, "" + } + i, e := strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad L32 Preference", l}, "" + } + rr.Preference = uint16(i) + <-c // zBlank + l = <-c // zString + rr.Locator32 = net.ParseIP(l.token) + if rr.Locator32 == nil || l.err { + return nil, &ParseError{f, "bad L32 Locator", l}, "" + } + return rr, nil, "" +} + +func setLP(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(LP) + rr.Hdr = h + + l := <-c + if l.length == 0 { + return rr, nil, "" + } + i, e := strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad LP Preference", l}, "" + } + rr.Preference = uint16(i) + <-c // zBlank + l = <-c // zString + rr.Fqdn = l.token + if l.length == 0 { + return rr, nil, "" + } + if l.token == "@" { + rr.Fqdn = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad LP Fqdn", l}, "" + } + if rr.Fqdn[l.length-1] != '.' { + rr.Fqdn = appendOrigin(rr.Fqdn, o) + } + return rr, nil, "" +} + +func setL64(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(L64) + rr.Hdr = h + + l := <-c + if l.length == 0 { + return rr, nil, "" + } + i, e := strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad L64 Preference", l}, "" + } + rr.Preference = uint16(i) + <-c // zBlank + l = <-c // zString + u, err := stringToNodeID(l) + if err != nil || l.err { + return nil, err, "" + } + rr.Locator64 = u + return rr, nil, "" +} + +func setUID(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(UID) + rr.Hdr = h + l := <-c + if l.length == 0 { + return rr, nil, "" + } + i, e := strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad UID Uid", l}, "" + } + rr.Uid = uint32(i) + return rr, nil, "" +} + +func setGID(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(GID) + rr.Hdr = h + l := <-c + if l.length == 0 { + return rr, nil, "" + } + i, e := strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad GID Gid", l}, "" + } + rr.Gid = uint32(i) + return rr, nil, "" +} + +func setUINFO(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(UINFO) + rr.Hdr = h + s, e, c1 := endingToTxtSlice(c, "bad UINFO Uinfo", f) + if e != nil { + return nil, e, "" + } + rr.Uinfo = s[0] // silently discard anything above + return rr, nil, c1 +} + +func setPX(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(PX) + rr.Hdr = h + + l := <-c + if l.length == 0 { + return rr, nil, "" + } + i, e := strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad PX Preference", l}, "" + } + rr.Preference = uint16(i) + <-c // zBlank + l = <-c // zString + rr.Map822 = l.token + if l.length == 0 { + return rr, nil, "" + } + if l.token == "@" { + rr.Map822 = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad PX Map822", l}, "" + } + if rr.Map822[l.length-1] != '.' { + rr.Map822 = appendOrigin(rr.Map822, o) + } + <-c // zBlank + l = <-c // zString + rr.Mapx400 = l.token + if l.token == "@" { + rr.Mapx400 = o + return rr, nil, "" + } + _, ok = IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad PX Mapx400", l}, "" + } + if rr.Mapx400[l.length-1] != '.' { + rr.Mapx400 = appendOrigin(rr.Mapx400, o) + } + return rr, nil, "" +} + +func setCAA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(CAA) + rr.Hdr = h + l := <-c + if l.length == 0 { + return rr, nil, l.comment + } + i, err := strconv.Atoi(l.token) + if err != nil || l.err { + return nil, &ParseError{f, "bad CAA Flag", l}, "" + } + rr.Flag = uint8(i) + + <-c // zBlank + l = <-c // zString + if l.value != zString { + return nil, &ParseError{f, "bad CAA Tag", l}, "" + } + rr.Tag = l.token + + <-c // zBlank + s, e, c1 := endingToTxtSlice(c, "bad CAA Value", f) + if e != nil { + return nil, e, "" + } + if len(s) > 1 { + return nil, &ParseError{f, "bad CAA Value", l}, "" + } + rr.Value = s[0] + return rr, nil, c1 +} + +var typeToparserFunc = map[uint16]parserFunc{ + TypeAAAA: {setAAAA, false}, + TypeAFSDB: {setAFSDB, false}, + TypeA: {setA, false}, + TypeCAA: {setCAA, true}, + TypeCDS: {setCDS, true}, + TypeCDNSKEY: {setCDNSKEY, true}, + TypeCERT: {setCERT, true}, + TypeCNAME: {setCNAME, false}, + TypeDHCID: {setDHCID, true}, + TypeDLV: {setDLV, true}, + TypeDNAME: {setDNAME, false}, + TypeKEY: {setKEY, true}, + TypeDNSKEY: {setDNSKEY, true}, + TypeDS: {setDS, true}, + TypeEID: {setEID, true}, + TypeEUI48: {setEUI48, false}, + TypeEUI64: {setEUI64, false}, + TypeGID: {setGID, false}, + TypeGPOS: {setGPOS, false}, + TypeHINFO: {setHINFO, true}, + TypeHIP: {setHIP, true}, + TypeKX: {setKX, false}, + TypeL32: {setL32, false}, + TypeL64: {setL64, false}, + TypeLOC: {setLOC, true}, + TypeLP: {setLP, false}, + TypeMB: {setMB, false}, + TypeMD: {setMD, false}, + TypeMF: {setMF, false}, + TypeMG: {setMG, false}, + TypeMINFO: {setMINFO, false}, + TypeMR: {setMR, false}, + TypeMX: {setMX, false}, + TypeNAPTR: {setNAPTR, false}, + TypeNID: {setNID, false}, + TypeNIMLOC: {setNIMLOC, true}, + TypeNINFO: {setNINFO, true}, + TypeNSAPPTR: {setNSAPPTR, false}, + TypeNSEC3PARAM: {setNSEC3PARAM, false}, + TypeNSEC3: {setNSEC3, true}, + TypeNSEC: {setNSEC, true}, + TypeNS: {setNS, false}, + TypeOPENPGPKEY: {setOPENPGPKEY, true}, + TypePTR: {setPTR, false}, + TypePX: {setPX, false}, + TypeSIG: {setSIG, true}, + TypeRKEY: {setRKEY, true}, + TypeRP: {setRP, false}, + TypeRRSIG: {setRRSIG, true}, + TypeRT: {setRT, false}, + TypeSMIMEA: {setSMIMEA, true}, + TypeSOA: {setSOA, false}, + TypeSPF: {setSPF, true}, + TypeSRV: {setSRV, false}, + TypeSSHFP: {setSSHFP, true}, + TypeTALINK: {setTALINK, false}, + TypeTA: {setTA, true}, + TypeTLSA: {setTLSA, true}, + TypeTXT: {setTXT, true}, + TypeUID: {setUID, false}, + TypeUINFO: {setUINFO, true}, + TypeURI: {setURI, true}, + TypeX25: {setX25, false}, +} diff --git a/vendor/github.com/miekg/dns/scanner.go b/vendor/github.com/miekg/dns/scanner.go new file mode 100644 index 00000000..c29bc2f3 --- /dev/null +++ b/vendor/github.com/miekg/dns/scanner.go @@ -0,0 +1,43 @@ +package dns + +// Implement a simple scanner, return a byte stream from an io reader. + +import ( + "bufio" + "io" + "text/scanner" +) + +type scan struct { + src *bufio.Reader + position scanner.Position + eof bool // Have we just seen a eof +} + +func scanInit(r io.Reader) *scan { + s := new(scan) + s.src = bufio.NewReader(r) + s.position.Line = 1 + return s +} + +// tokenText returns the next byte from the input +func (s *scan) tokenText() (byte, error) { + c, err := s.src.ReadByte() + if err != nil { + return c, err + } + // delay the newline handling until the next token is delivered, + // fixes off-by-one errors when reporting a parse error. + if s.eof == true { + s.position.Line++ + s.position.Column = 0 + s.eof = false + } + if c == '\n' { + s.eof = true + return c, nil + } + s.position.Column++ + return c, nil +} diff --git a/vendor/github.com/miekg/dns/server.go b/vendor/github.com/miekg/dns/server.go new file mode 100644 index 00000000..1d40ee56 --- /dev/null +++ b/vendor/github.com/miekg/dns/server.go @@ -0,0 +1,734 @@ +// DNS server implementation. + +package dns + +import ( + "bytes" + "crypto/tls" + "encoding/binary" + "io" + "net" + "sync" + "time" +) + +// Maximum number of TCP queries before we close the socket. +const maxTCPQueries = 128 + +// Handler is implemented by any value that implements ServeDNS. +type Handler interface { + ServeDNS(w ResponseWriter, r *Msg) +} + +// A ResponseWriter interface is used by an DNS handler to +// construct an DNS response. +type ResponseWriter interface { + // LocalAddr returns the net.Addr of the server + LocalAddr() net.Addr + // RemoteAddr returns the net.Addr of the client that sent the current request. + RemoteAddr() net.Addr + // WriteMsg writes a reply back to the client. + WriteMsg(*Msg) error + // Write writes a raw buffer back to the client. + Write([]byte) (int, error) + // Close closes the connection. + Close() error + // TsigStatus returns the status of the Tsig. + TsigStatus() error + // TsigTimersOnly sets the tsig timers only boolean. + TsigTimersOnly(bool) + // Hijack lets the caller take over the connection. + // After a call to Hijack(), the DNS package will not do anything with the connection. + Hijack() +} + +type response struct { + hijacked bool // connection has been hijacked by handler + tsigStatus error + tsigTimersOnly bool + tsigRequestMAC string + tsigSecret map[string]string // the tsig secrets + udp *net.UDPConn // i/o connection if UDP was used + tcp net.Conn // i/o connection if TCP was used + udpSession *SessionUDP // oob data to get egress interface right + remoteAddr net.Addr // address of the client + writer Writer // writer to output the raw DNS bits +} + +// ServeMux is an DNS request multiplexer. It matches the +// zone name of each incoming request against a list of +// registered patterns add calls the handler for the pattern +// that most closely matches the zone name. ServeMux is DNSSEC aware, meaning +// that queries for the DS record are redirected to the parent zone (if that +// is also registered), otherwise the child gets the query. +// ServeMux is also safe for concurrent access from multiple goroutines. +type ServeMux struct { + z map[string]Handler + m *sync.RWMutex +} + +// NewServeMux allocates and returns a new ServeMux. +func NewServeMux() *ServeMux { return &ServeMux{z: make(map[string]Handler), m: new(sync.RWMutex)} } + +// DefaultServeMux is the default ServeMux used by Serve. +var DefaultServeMux = NewServeMux() + +// The HandlerFunc type is an adapter to allow the use of +// ordinary functions as DNS handlers. If f is a function +// with the appropriate signature, HandlerFunc(f) is a +// Handler object that calls f. +type HandlerFunc func(ResponseWriter, *Msg) + +// ServeDNS calls f(w, r). +func (f HandlerFunc) ServeDNS(w ResponseWriter, r *Msg) { + f(w, r) +} + +// HandleFailed returns a HandlerFunc that returns SERVFAIL for every request it gets. +func HandleFailed(w ResponseWriter, r *Msg) { + m := new(Msg) + m.SetRcode(r, RcodeServerFailure) + // does not matter if this write fails + w.WriteMsg(m) +} + +func failedHandler() Handler { return HandlerFunc(HandleFailed) } + +// ListenAndServe Starts a server on address and network specified Invoke handler +// for incoming queries. +func ListenAndServe(addr string, network string, handler Handler) error { + server := &Server{Addr: addr, Net: network, Handler: handler} + return server.ListenAndServe() +} + +// ListenAndServeTLS acts like http.ListenAndServeTLS, more information in +// http://golang.org/pkg/net/http/#ListenAndServeTLS +func ListenAndServeTLS(addr, certFile, keyFile string, handler Handler) error { + cert, err := tls.LoadX509KeyPair(certFile, keyFile) + if err != nil { + return err + } + + config := tls.Config{ + Certificates: []tls.Certificate{cert}, + } + + server := &Server{ + Addr: addr, + Net: "tcp-tls", + TLSConfig: &config, + Handler: handler, + } + + return server.ListenAndServe() +} + +// ActivateAndServe activates a server with a listener from systemd, +// l and p should not both be non-nil. +// If both l and p are not nil only p will be used. +// Invoke handler for incoming queries. +func ActivateAndServe(l net.Listener, p net.PacketConn, handler Handler) error { + server := &Server{Listener: l, PacketConn: p, Handler: handler} + return server.ActivateAndServe() +} + +func (mux *ServeMux) match(q string, t uint16) Handler { + mux.m.RLock() + defer mux.m.RUnlock() + var handler Handler + b := make([]byte, len(q)) // worst case, one label of length q + off := 0 + end := false + for { + l := len(q[off:]) + for i := 0; i < l; i++ { + b[i] = q[off+i] + if b[i] >= 'A' && b[i] <= 'Z' { + b[i] |= ('a' - 'A') + } + } + if h, ok := mux.z[string(b[:l])]; ok { // causes garbage, might want to change the map key + if t != TypeDS { + return h + } + // Continue for DS to see if we have a parent too, if so delegeate to the parent + handler = h + } + off, end = NextLabel(q, off) + if end { + break + } + } + // Wildcard match, if we have found nothing try the root zone as a last resort. + if h, ok := mux.z["."]; ok { + return h + } + return handler +} + +// Handle adds a handler to the ServeMux for pattern. +func (mux *ServeMux) Handle(pattern string, handler Handler) { + if pattern == "" { + panic("dns: invalid pattern " + pattern) + } + mux.m.Lock() + mux.z[Fqdn(pattern)] = handler + mux.m.Unlock() +} + +// HandleFunc adds a handler function to the ServeMux for pattern. +func (mux *ServeMux) HandleFunc(pattern string, handler func(ResponseWriter, *Msg)) { + mux.Handle(pattern, HandlerFunc(handler)) +} + +// HandleRemove deregistrars the handler specific for pattern from the ServeMux. +func (mux *ServeMux) HandleRemove(pattern string) { + if pattern == "" { + panic("dns: invalid pattern " + pattern) + } + mux.m.Lock() + delete(mux.z, Fqdn(pattern)) + mux.m.Unlock() +} + +// ServeDNS dispatches the request to the handler whose +// pattern most closely matches the request message. If DefaultServeMux +// is used the correct thing for DS queries is done: a possible parent +// is sought. +// If no handler is found a standard SERVFAIL message is returned +// If the request message does not have exactly one question in the +// question section a SERVFAIL is returned, unlesss Unsafe is true. +func (mux *ServeMux) ServeDNS(w ResponseWriter, request *Msg) { + var h Handler + if len(request.Question) < 1 { // allow more than one question + h = failedHandler() + } else { + if h = mux.match(request.Question[0].Name, request.Question[0].Qtype); h == nil { + h = failedHandler() + } + } + h.ServeDNS(w, request) +} + +// Handle registers the handler with the given pattern +// in the DefaultServeMux. The documentation for +// ServeMux explains how patterns are matched. +func Handle(pattern string, handler Handler) { DefaultServeMux.Handle(pattern, handler) } + +// HandleRemove deregisters the handle with the given pattern +// in the DefaultServeMux. +func HandleRemove(pattern string) { DefaultServeMux.HandleRemove(pattern) } + +// HandleFunc registers the handler function with the given pattern +// in the DefaultServeMux. +func HandleFunc(pattern string, handler func(ResponseWriter, *Msg)) { + DefaultServeMux.HandleFunc(pattern, handler) +} + +// Writer writes raw DNS messages; each call to Write should send an entire message. +type Writer interface { + io.Writer +} + +// Reader reads raw DNS messages; each call to ReadTCP or ReadUDP should return an entire message. +type Reader interface { + // ReadTCP reads a raw message from a TCP connection. Implementations may alter + // connection properties, for example the read-deadline. + ReadTCP(conn net.Conn, timeout time.Duration) ([]byte, error) + // ReadUDP reads a raw message from a UDP connection. Implementations may alter + // connection properties, for example the read-deadline. + ReadUDP(conn *net.UDPConn, timeout time.Duration) ([]byte, *SessionUDP, error) +} + +// defaultReader is an adapter for the Server struct that implements the Reader interface +// using the readTCP and readUDP func of the embedded Server. +type defaultReader struct { + *Server +} + +func (dr *defaultReader) ReadTCP(conn net.Conn, timeout time.Duration) ([]byte, error) { + return dr.readTCP(conn, timeout) +} + +func (dr *defaultReader) ReadUDP(conn *net.UDPConn, timeout time.Duration) ([]byte, *SessionUDP, error) { + return dr.readUDP(conn, timeout) +} + +// DecorateReader is a decorator hook for extending or supplanting the functionality of a Reader. +// Implementations should never return a nil Reader. +type DecorateReader func(Reader) Reader + +// DecorateWriter is a decorator hook for extending or supplanting the functionality of a Writer. +// Implementations should never return a nil Writer. +type DecorateWriter func(Writer) Writer + +// A Server defines parameters for running an DNS server. +type Server struct { + // Address to listen on, ":dns" if empty. + Addr string + // if "tcp" or "tcp-tls" (DNS over TLS) it will invoke a TCP listener, otherwise an UDP one + Net string + // TCP Listener to use, this is to aid in systemd's socket activation. + Listener net.Listener + // TLS connection configuration + TLSConfig *tls.Config + // UDP "Listener" to use, this is to aid in systemd's socket activation. + PacketConn net.PacketConn + // Handler to invoke, dns.DefaultServeMux if nil. + Handler Handler + // Default buffer size to use to read incoming UDP messages. If not set + // it defaults to MinMsgSize (512 B). + UDPSize int + // The net.Conn.SetReadTimeout value for new connections, defaults to 2 * time.Second. + ReadTimeout time.Duration + // The net.Conn.SetWriteTimeout value for new connections, defaults to 2 * time.Second. + WriteTimeout time.Duration + // TCP idle timeout for multiple queries, if nil, defaults to 8 * time.Second (RFC 5966). + IdleTimeout func() time.Duration + // Secret(s) for Tsig map[]. + TsigSecret map[string]string + // Unsafe instructs the server to disregard any sanity checks and directly hand the message to + // the handler. It will specifically not check if the query has the QR bit not set. + Unsafe bool + // If NotifyStartedFunc is set it is called once the server has started listening. + NotifyStartedFunc func() + // DecorateReader is optional, allows customization of the process that reads raw DNS messages. + DecorateReader DecorateReader + // DecorateWriter is optional, allows customization of the process that writes raw DNS messages. + DecorateWriter DecorateWriter + + // Graceful shutdown handling + + inFlight sync.WaitGroup + + lock sync.RWMutex + started bool +} + +// ListenAndServe starts a nameserver on the configured address in *Server. +func (srv *Server) ListenAndServe() error { + srv.lock.Lock() + defer srv.lock.Unlock() + if srv.started { + return &Error{err: "server already started"} + } + addr := srv.Addr + if addr == "" { + addr = ":domain" + } + if srv.UDPSize == 0 { + srv.UDPSize = MinMsgSize + } + switch srv.Net { + case "tcp", "tcp4", "tcp6": + a, err := net.ResolveTCPAddr(srv.Net, addr) + if err != nil { + return err + } + l, err := net.ListenTCP(srv.Net, a) + if err != nil { + return err + } + srv.Listener = l + srv.started = true + srv.lock.Unlock() + err = srv.serveTCP(l) + srv.lock.Lock() // to satisfy the defer at the top + return err + case "tcp-tls", "tcp4-tls", "tcp6-tls": + network := "tcp" + if srv.Net == "tcp4-tls" { + network = "tcp4" + } else if srv.Net == "tcp6" { + network = "tcp6" + } + + l, err := tls.Listen(network, addr, srv.TLSConfig) + if err != nil { + return err + } + srv.Listener = l + srv.started = true + srv.lock.Unlock() + err = srv.serveTCP(l) + srv.lock.Lock() // to satisfy the defer at the top + return err + case "udp", "udp4", "udp6": + a, err := net.ResolveUDPAddr(srv.Net, addr) + if err != nil { + return err + } + l, err := net.ListenUDP(srv.Net, a) + if err != nil { + return err + } + if e := setUDPSocketOptions(l); e != nil { + return e + } + srv.PacketConn = l + srv.started = true + srv.lock.Unlock() + err = srv.serveUDP(l) + srv.lock.Lock() // to satisfy the defer at the top + return err + } + return &Error{err: "bad network"} +} + +// ActivateAndServe starts a nameserver with the PacketConn or Listener +// configured in *Server. Its main use is to start a server from systemd. +func (srv *Server) ActivateAndServe() error { + srv.lock.Lock() + defer srv.lock.Unlock() + if srv.started { + return &Error{err: "server already started"} + } + pConn := srv.PacketConn + l := srv.Listener + if pConn != nil { + if srv.UDPSize == 0 { + srv.UDPSize = MinMsgSize + } + // Check PacketConn interface's type is valid and value + // is not nil + if t, ok := pConn.(*net.UDPConn); ok && t != nil { + if e := setUDPSocketOptions(t); e != nil { + return e + } + srv.started = true + srv.lock.Unlock() + e := srv.serveUDP(t) + srv.lock.Lock() // to satisfy the defer at the top + return e + } + } + if l != nil { + srv.started = true + srv.lock.Unlock() + e := srv.serveTCP(l) + srv.lock.Lock() // to satisfy the defer at the top + return e + } + return &Error{err: "bad listeners"} +} + +// Shutdown gracefully shuts down a server. After a call to Shutdown, ListenAndServe and +// ActivateAndServe will return. All in progress queries are completed before the server +// is taken down. If the Shutdown is taking longer than the reading timeout an error +// is returned. +func (srv *Server) Shutdown() error { + srv.lock.Lock() + if !srv.started { + srv.lock.Unlock() + return &Error{err: "server not started"} + } + srv.started = false + srv.lock.Unlock() + + if srv.PacketConn != nil { + srv.PacketConn.Close() + } + if srv.Listener != nil { + srv.Listener.Close() + } + + fin := make(chan bool) + go func() { + srv.inFlight.Wait() + fin <- true + }() + + select { + case <-time.After(srv.getReadTimeout()): + return &Error{err: "server shutdown is pending"} + case <-fin: + return nil + } +} + +// getReadTimeout is a helper func to use system timeout if server did not intend to change it. +func (srv *Server) getReadTimeout() time.Duration { + rtimeout := dnsTimeout + if srv.ReadTimeout != 0 { + rtimeout = srv.ReadTimeout + } + return rtimeout +} + +// serveTCP starts a TCP listener for the server. +// Each request is handled in a separate goroutine. +func (srv *Server) serveTCP(l net.Listener) error { + defer l.Close() + + if srv.NotifyStartedFunc != nil { + srv.NotifyStartedFunc() + } + + reader := Reader(&defaultReader{srv}) + if srv.DecorateReader != nil { + reader = srv.DecorateReader(reader) + } + + handler := srv.Handler + if handler == nil { + handler = DefaultServeMux + } + rtimeout := srv.getReadTimeout() + // deadline is not used here + for { + rw, err := l.Accept() + if err != nil { + if neterr, ok := err.(net.Error); ok && neterr.Temporary() { + continue + } + return err + } + m, err := reader.ReadTCP(rw, rtimeout) + srv.lock.RLock() + if !srv.started { + srv.lock.RUnlock() + return nil + } + srv.lock.RUnlock() + if err != nil { + continue + } + srv.inFlight.Add(1) + go srv.serve(rw.RemoteAddr(), handler, m, nil, nil, rw) + } +} + +// serveUDP starts a UDP listener for the server. +// Each request is handled in a separate goroutine. +func (srv *Server) serveUDP(l *net.UDPConn) error { + defer l.Close() + + if srv.NotifyStartedFunc != nil { + srv.NotifyStartedFunc() + } + + reader := Reader(&defaultReader{srv}) + if srv.DecorateReader != nil { + reader = srv.DecorateReader(reader) + } + + handler := srv.Handler + if handler == nil { + handler = DefaultServeMux + } + rtimeout := srv.getReadTimeout() + // deadline is not used here + for { + m, s, err := reader.ReadUDP(l, rtimeout) + srv.lock.RLock() + if !srv.started { + srv.lock.RUnlock() + return nil + } + srv.lock.RUnlock() + if err != nil { + continue + } + srv.inFlight.Add(1) + go srv.serve(s.RemoteAddr(), handler, m, l, s, nil) + } +} + +// Serve a new connection. +func (srv *Server) serve(a net.Addr, h Handler, m []byte, u *net.UDPConn, s *SessionUDP, t net.Conn) { + defer srv.inFlight.Done() + + w := &response{tsigSecret: srv.TsigSecret, udp: u, tcp: t, remoteAddr: a, udpSession: s} + if srv.DecorateWriter != nil { + w.writer = srv.DecorateWriter(w) + } else { + w.writer = w + } + + q := 0 // counter for the amount of TCP queries we get + + reader := Reader(&defaultReader{srv}) + if srv.DecorateReader != nil { + reader = srv.DecorateReader(reader) + } +Redo: + req := new(Msg) + err := req.Unpack(m) + if err != nil { // Send a FormatError back + x := new(Msg) + x.SetRcodeFormatError(req) + w.WriteMsg(x) + goto Exit + } + if !srv.Unsafe && req.Response { + goto Exit + } + + w.tsigStatus = nil + if w.tsigSecret != nil { + if t := req.IsTsig(); t != nil { + secret := t.Hdr.Name + if _, ok := w.tsigSecret[secret]; !ok { + w.tsigStatus = ErrKeyAlg + } + w.tsigStatus = TsigVerify(m, w.tsigSecret[secret], "", false) + w.tsigTimersOnly = false + w.tsigRequestMAC = req.Extra[len(req.Extra)-1].(*TSIG).MAC + } + } + h.ServeDNS(w, req) // Writes back to the client + +Exit: + if w.tcp == nil { + return + } + // TODO(miek): make this number configurable? + if q > maxTCPQueries { // close socket after this many queries + w.Close() + return + } + + if w.hijacked { + return // client calls Close() + } + if u != nil { // UDP, "close" and return + w.Close() + return + } + idleTimeout := tcpIdleTimeout + if srv.IdleTimeout != nil { + idleTimeout = srv.IdleTimeout() + } + m, err = reader.ReadTCP(w.tcp, idleTimeout) + if err == nil { + q++ + goto Redo + } + w.Close() + return +} + +func (srv *Server) readTCP(conn net.Conn, timeout time.Duration) ([]byte, error) { + conn.SetReadDeadline(time.Now().Add(timeout)) + l := make([]byte, 2) + n, err := conn.Read(l) + if err != nil || n != 2 { + if err != nil { + return nil, err + } + return nil, ErrShortRead + } + length := binary.BigEndian.Uint16(l) + if length == 0 { + return nil, ErrShortRead + } + m := make([]byte, int(length)) + n, err = conn.Read(m[:int(length)]) + if err != nil || n == 0 { + if err != nil { + return nil, err + } + return nil, ErrShortRead + } + i := n + for i < int(length) { + j, err := conn.Read(m[i:int(length)]) + if err != nil { + return nil, err + } + i += j + } + n = i + m = m[:n] + return m, nil +} + +func (srv *Server) readUDP(conn *net.UDPConn, timeout time.Duration) ([]byte, *SessionUDP, error) { + conn.SetReadDeadline(time.Now().Add(timeout)) + m := make([]byte, srv.UDPSize) + n, s, err := ReadFromSessionUDP(conn, m) + if err != nil || n == 0 { + if err != nil { + return nil, nil, err + } + return nil, nil, ErrShortRead + } + m = m[:n] + return m, s, nil +} + +// WriteMsg implements the ResponseWriter.WriteMsg method. +func (w *response) WriteMsg(m *Msg) (err error) { + var data []byte + if w.tsigSecret != nil { // if no secrets, dont check for the tsig (which is a longer check) + if t := m.IsTsig(); t != nil { + data, w.tsigRequestMAC, err = TsigGenerate(m, w.tsigSecret[t.Hdr.Name], w.tsigRequestMAC, w.tsigTimersOnly) + if err != nil { + return err + } + _, err = w.writer.Write(data) + return err + } + } + data, err = m.Pack() + if err != nil { + return err + } + _, err = w.writer.Write(data) + return err +} + +// Write implements the ResponseWriter.Write method. +func (w *response) Write(m []byte) (int, error) { + switch { + case w.udp != nil: + n, err := WriteToSessionUDP(w.udp, m, w.udpSession) + return n, err + case w.tcp != nil: + lm := len(m) + if lm < 2 { + return 0, io.ErrShortBuffer + } + if lm > MaxMsgSize { + return 0, &Error{err: "message too large"} + } + l := make([]byte, 2, 2+lm) + binary.BigEndian.PutUint16(l, uint16(lm)) + m = append(l, m...) + + n, err := io.Copy(w.tcp, bytes.NewReader(m)) + return int(n), err + } + panic("not reached") +} + +// LocalAddr implements the ResponseWriter.LocalAddr method. +func (w *response) LocalAddr() net.Addr { + if w.tcp != nil { + return w.tcp.LocalAddr() + } + return w.udp.LocalAddr() +} + +// RemoteAddr implements the ResponseWriter.RemoteAddr method. +func (w *response) RemoteAddr() net.Addr { return w.remoteAddr } + +// TsigStatus implements the ResponseWriter.TsigStatus method. +func (w *response) TsigStatus() error { return w.tsigStatus } + +// TsigTimersOnly implements the ResponseWriter.TsigTimersOnly method. +func (w *response) TsigTimersOnly(b bool) { w.tsigTimersOnly = b } + +// Hijack implements the ResponseWriter.Hijack method. +func (w *response) Hijack() { w.hijacked = true } + +// Close implements the ResponseWriter.Close method +func (w *response) Close() error { + // Can't close the udp conn, as that is actually the listener. + if w.tcp != nil { + e := w.tcp.Close() + w.tcp = nil + return e + } + return nil +} diff --git a/vendor/github.com/miekg/dns/sig0.go b/vendor/github.com/miekg/dns/sig0.go new file mode 100644 index 00000000..2dce06af --- /dev/null +++ b/vendor/github.com/miekg/dns/sig0.go @@ -0,0 +1,219 @@ +package dns + +import ( + "crypto" + "crypto/dsa" + "crypto/ecdsa" + "crypto/rsa" + "encoding/binary" + "math/big" + "strings" + "time" +) + +// Sign signs a dns.Msg. It fills the signature with the appropriate data. +// The SIG record should have the SignerName, KeyTag, Algorithm, Inception +// and Expiration set. +func (rr *SIG) Sign(k crypto.Signer, m *Msg) ([]byte, error) { + if k == nil { + return nil, ErrPrivKey + } + if rr.KeyTag == 0 || len(rr.SignerName) == 0 || rr.Algorithm == 0 { + return nil, ErrKey + } + rr.Header().Rrtype = TypeSIG + rr.Header().Class = ClassANY + rr.Header().Ttl = 0 + rr.Header().Name = "." + rr.OrigTtl = 0 + rr.TypeCovered = 0 + rr.Labels = 0 + + buf := make([]byte, m.Len()+rr.len()) + mbuf, err := m.PackBuffer(buf) + if err != nil { + return nil, err + } + if &buf[0] != &mbuf[0] { + return nil, ErrBuf + } + off, err := PackRR(rr, buf, len(mbuf), nil, false) + if err != nil { + return nil, err + } + buf = buf[:off:cap(buf)] + + hash, ok := AlgorithmToHash[rr.Algorithm] + if !ok { + return nil, ErrAlg + } + + hasher := hash.New() + // Write SIG rdata + hasher.Write(buf[len(mbuf)+1+2+2+4+2:]) + // Write message + hasher.Write(buf[:len(mbuf)]) + + signature, err := sign(k, hasher.Sum(nil), hash, rr.Algorithm) + if err != nil { + return nil, err + } + + rr.Signature = toBase64(signature) + sig := string(signature) + + buf = append(buf, sig...) + if len(buf) > int(^uint16(0)) { + return nil, ErrBuf + } + // Adjust sig data length + rdoff := len(mbuf) + 1 + 2 + 2 + 4 + rdlen := binary.BigEndian.Uint16(buf[rdoff:]) + rdlen += uint16(len(sig)) + binary.BigEndian.PutUint16(buf[rdoff:], rdlen) + // Adjust additional count + adc := binary.BigEndian.Uint16(buf[10:]) + adc++ + binary.BigEndian.PutUint16(buf[10:], adc) + return buf, nil +} + +// Verify validates the message buf using the key k. +// It's assumed that buf is a valid message from which rr was unpacked. +func (rr *SIG) Verify(k *KEY, buf []byte) error { + if k == nil { + return ErrKey + } + if rr.KeyTag == 0 || len(rr.SignerName) == 0 || rr.Algorithm == 0 { + return ErrKey + } + + var hash crypto.Hash + switch rr.Algorithm { + case DSA, RSASHA1: + hash = crypto.SHA1 + case RSASHA256, ECDSAP256SHA256: + hash = crypto.SHA256 + case ECDSAP384SHA384: + hash = crypto.SHA384 + case RSASHA512: + hash = crypto.SHA512 + default: + return ErrAlg + } + hasher := hash.New() + + buflen := len(buf) + qdc := binary.BigEndian.Uint16(buf[4:]) + anc := binary.BigEndian.Uint16(buf[6:]) + auc := binary.BigEndian.Uint16(buf[8:]) + adc := binary.BigEndian.Uint16(buf[10:]) + offset := 12 + var err error + for i := uint16(0); i < qdc && offset < buflen; i++ { + _, offset, err = UnpackDomainName(buf, offset) + if err != nil { + return err + } + // Skip past Type and Class + offset += 2 + 2 + } + for i := uint16(1); i < anc+auc+adc && offset < buflen; i++ { + _, offset, err = UnpackDomainName(buf, offset) + if err != nil { + return err + } + // Skip past Type, Class and TTL + offset += 2 + 2 + 4 + if offset+1 >= buflen { + continue + } + var rdlen uint16 + rdlen = binary.BigEndian.Uint16(buf[offset:]) + offset += 2 + offset += int(rdlen) + } + if offset >= buflen { + return &Error{err: "overflowing unpacking signed message"} + } + + // offset should be just prior to SIG + bodyend := offset + // owner name SHOULD be root + _, offset, err = UnpackDomainName(buf, offset) + if err != nil { + return err + } + // Skip Type, Class, TTL, RDLen + offset += 2 + 2 + 4 + 2 + sigstart := offset + // Skip Type Covered, Algorithm, Labels, Original TTL + offset += 2 + 1 + 1 + 4 + if offset+4+4 >= buflen { + return &Error{err: "overflow unpacking signed message"} + } + expire := binary.BigEndian.Uint32(buf[offset:]) + offset += 4 + incept := binary.BigEndian.Uint32(buf[offset:]) + offset += 4 + now := uint32(time.Now().Unix()) + if now < incept || now > expire { + return ErrTime + } + // Skip key tag + offset += 2 + var signername string + signername, offset, err = UnpackDomainName(buf, offset) + if err != nil { + return err + } + // If key has come from the DNS name compression might + // have mangled the case of the name + if strings.ToLower(signername) != strings.ToLower(k.Header().Name) { + return &Error{err: "signer name doesn't match key name"} + } + sigend := offset + hasher.Write(buf[sigstart:sigend]) + hasher.Write(buf[:10]) + hasher.Write([]byte{ + byte((adc - 1) << 8), + byte(adc - 1), + }) + hasher.Write(buf[12:bodyend]) + + hashed := hasher.Sum(nil) + sig := buf[sigend:] + switch k.Algorithm { + case DSA: + pk := k.publicKeyDSA() + sig = sig[1:] + r := big.NewInt(0) + r.SetBytes(sig[:len(sig)/2]) + s := big.NewInt(0) + s.SetBytes(sig[len(sig)/2:]) + if pk != nil { + if dsa.Verify(pk, hashed, r, s) { + return nil + } + return ErrSig + } + case RSASHA1, RSASHA256, RSASHA512: + pk := k.publicKeyRSA() + if pk != nil { + return rsa.VerifyPKCS1v15(pk, hash, hashed, sig) + } + case ECDSAP256SHA256, ECDSAP384SHA384: + pk := k.publicKeyECDSA() + r := big.NewInt(0) + r.SetBytes(sig[:len(sig)/2]) + s := big.NewInt(0) + s.SetBytes(sig[len(sig)/2:]) + if pk != nil { + if ecdsa.Verify(pk, hashed, r, s) { + return nil + } + return ErrSig + } + } + return ErrKeyAlg +} diff --git a/vendor/github.com/miekg/dns/singleinflight.go b/vendor/github.com/miekg/dns/singleinflight.go new file mode 100644 index 00000000..9573c7d0 --- /dev/null +++ b/vendor/github.com/miekg/dns/singleinflight.go @@ -0,0 +1,57 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Adapted for dns package usage by Miek Gieben. + +package dns + +import "sync" +import "time" + +// call is an in-flight or completed singleflight.Do call +type call struct { + wg sync.WaitGroup + val *Msg + rtt time.Duration + err error + dups int +} + +// singleflight represents a class of work and forms a namespace in +// which units of work can be executed with duplicate suppression. +type singleflight struct { + sync.Mutex // protects m + m map[string]*call // lazily initialized +} + +// Do executes and returns the results of the given function, making +// sure that only one execution is in-flight for a given key at a +// time. If a duplicate comes in, the duplicate caller waits for the +// original to complete and receives the same results. +// The return value shared indicates whether v was given to multiple callers. +func (g *singleflight) Do(key string, fn func() (*Msg, time.Duration, error)) (v *Msg, rtt time.Duration, err error, shared bool) { + g.Lock() + if g.m == nil { + g.m = make(map[string]*call) + } + if c, ok := g.m[key]; ok { + c.dups++ + g.Unlock() + c.wg.Wait() + return c.val, c.rtt, c.err, true + } + c := new(call) + c.wg.Add(1) + g.m[key] = c + g.Unlock() + + c.val, c.rtt, c.err = fn() + c.wg.Done() + + g.Lock() + delete(g.m, key) + g.Unlock() + + return c.val, c.rtt, c.err, c.dups > 0 +} diff --git a/vendor/github.com/miekg/dns/smimea.go b/vendor/github.com/miekg/dns/smimea.go new file mode 100644 index 00000000..3a4bb570 --- /dev/null +++ b/vendor/github.com/miekg/dns/smimea.go @@ -0,0 +1,47 @@ +package dns + +import ( + "crypto/sha256" + "crypto/x509" + "encoding/hex" +) + +// Sign creates a SMIMEA record from an SSL certificate. +func (r *SMIMEA) Sign(usage, selector, matchingType int, cert *x509.Certificate) (err error) { + r.Hdr.Rrtype = TypeSMIMEA + r.Usage = uint8(usage) + r.Selector = uint8(selector) + r.MatchingType = uint8(matchingType) + + r.Certificate, err = CertificateToDANE(r.Selector, r.MatchingType, cert) + if err != nil { + return err + } + return nil +} + +// Verify verifies a SMIMEA record against an SSL certificate. If it is OK +// a nil error is returned. +func (r *SMIMEA) Verify(cert *x509.Certificate) error { + c, err := CertificateToDANE(r.Selector, r.MatchingType, cert) + if err != nil { + return err // Not also ErrSig? + } + if r.Certificate == c { + return nil + } + return ErrSig // ErrSig, really? +} + +// SIMEAName returns the ownername of a SMIMEA resource record as per the +// format specified in RFC 'draft-ietf-dane-smime-12' Section 2 and 3 +func SMIMEAName(email_address string, domain_name string) (string, error) { + hasher := sha256.New() + hasher.Write([]byte(email_address)) + + // RFC Section 3: "The local-part is hashed using the SHA2-256 + // algorithm with the hash truncated to 28 octets and + // represented in its hexadecimal representation to become the + // left-most label in the prepared domain name" + return hex.EncodeToString(hasher.Sum(nil)[:28]) + "." + "_smimecert." + domain_name, nil +} diff --git a/vendor/github.com/miekg/dns/tlsa.go b/vendor/github.com/miekg/dns/tlsa.go new file mode 100644 index 00000000..431e2fb5 --- /dev/null +++ b/vendor/github.com/miekg/dns/tlsa.go @@ -0,0 +1,47 @@ +package dns + +import ( + "crypto/x509" + "net" + "strconv" +) + +// Sign creates a TLSA record from an SSL certificate. +func (r *TLSA) Sign(usage, selector, matchingType int, cert *x509.Certificate) (err error) { + r.Hdr.Rrtype = TypeTLSA + r.Usage = uint8(usage) + r.Selector = uint8(selector) + r.MatchingType = uint8(matchingType) + + r.Certificate, err = CertificateToDANE(r.Selector, r.MatchingType, cert) + if err != nil { + return err + } + return nil +} + +// Verify verifies a TLSA record against an SSL certificate. If it is OK +// a nil error is returned. +func (r *TLSA) Verify(cert *x509.Certificate) error { + c, err := CertificateToDANE(r.Selector, r.MatchingType, cert) + if err != nil { + return err // Not also ErrSig? + } + if r.Certificate == c { + return nil + } + return ErrSig // ErrSig, really? +} + +// TLSAName returns the ownername of a TLSA resource record as per the +// rules specified in RFC 6698, Section 3. +func TLSAName(name, service, network string) (string, error) { + if !IsFqdn(name) { + return "", ErrFqdn + } + p, err := net.LookupPort(network, service) + if err != nil { + return "", err + } + return "_" + strconv.Itoa(p) + "._" + network + "." + name, nil +} diff --git a/vendor/github.com/miekg/dns/tsig.go b/vendor/github.com/miekg/dns/tsig.go new file mode 100644 index 00000000..78365e1c --- /dev/null +++ b/vendor/github.com/miekg/dns/tsig.go @@ -0,0 +1,384 @@ +package dns + +import ( + "crypto/hmac" + "crypto/md5" + "crypto/sha1" + "crypto/sha256" + "crypto/sha512" + "encoding/binary" + "encoding/hex" + "hash" + "io" + "strconv" + "strings" + "time" +) + +// HMAC hashing codes. These are transmitted as domain names. +const ( + HmacMD5 = "hmac-md5.sig-alg.reg.int." + HmacSHA1 = "hmac-sha1." + HmacSHA256 = "hmac-sha256." + HmacSHA512 = "hmac-sha512." +) + +// TSIG is the RR the holds the transaction signature of a message. +// See RFC 2845 and RFC 4635. +type TSIG struct { + Hdr RR_Header + Algorithm string `dns:"domain-name"` + TimeSigned uint64 `dns:"uint48"` + Fudge uint16 + MACSize uint16 + MAC string `dns:"size-hex:MACSize"` + OrigId uint16 + Error uint16 + OtherLen uint16 + OtherData string `dns:"size-hex:OtherLen"` +} + +// TSIG has no official presentation format, but this will suffice. + +func (rr *TSIG) String() string { + s := "\n;; TSIG PSEUDOSECTION:\n" + s += rr.Hdr.String() + + " " + rr.Algorithm + + " " + tsigTimeToString(rr.TimeSigned) + + " " + strconv.Itoa(int(rr.Fudge)) + + " " + strconv.Itoa(int(rr.MACSize)) + + " " + strings.ToUpper(rr.MAC) + + " " + strconv.Itoa(int(rr.OrigId)) + + " " + strconv.Itoa(int(rr.Error)) + // BIND prints NOERROR + " " + strconv.Itoa(int(rr.OtherLen)) + + " " + rr.OtherData + return s +} + +// The following values must be put in wireformat, so that the MAC can be calculated. +// RFC 2845, section 3.4.2. TSIG Variables. +type tsigWireFmt struct { + // From RR_Header + Name string `dns:"domain-name"` + Class uint16 + Ttl uint32 + // Rdata of the TSIG + Algorithm string `dns:"domain-name"` + TimeSigned uint64 `dns:"uint48"` + Fudge uint16 + // MACSize, MAC and OrigId excluded + Error uint16 + OtherLen uint16 + OtherData string `dns:"size-hex:OtherLen"` +} + +// If we have the MAC use this type to convert it to wiredata. Section 3.4.3. Request MAC +type macWireFmt struct { + MACSize uint16 + MAC string `dns:"size-hex:MACSize"` +} + +// 3.3. Time values used in TSIG calculations +type timerWireFmt struct { + TimeSigned uint64 `dns:"uint48"` + Fudge uint16 +} + +// TsigGenerate fills out the TSIG record attached to the message. +// The message should contain +// a "stub" TSIG RR with the algorithm, key name (owner name of the RR), +// time fudge (defaults to 300 seconds) and the current time +// The TSIG MAC is saved in that Tsig RR. +// When TsigGenerate is called for the first time requestMAC is set to the empty string and +// timersOnly is false. +// If something goes wrong an error is returned, otherwise it is nil. +func TsigGenerate(m *Msg, secret, requestMAC string, timersOnly bool) ([]byte, string, error) { + if m.IsTsig() == nil { + panic("dns: TSIG not last RR in additional") + } + // If we barf here, the caller is to blame + rawsecret, err := fromBase64([]byte(secret)) + if err != nil { + return nil, "", err + } + + rr := m.Extra[len(m.Extra)-1].(*TSIG) + m.Extra = m.Extra[0 : len(m.Extra)-1] // kill the TSIG from the msg + mbuf, err := m.Pack() + if err != nil { + return nil, "", err + } + buf := tsigBuffer(mbuf, rr, requestMAC, timersOnly) + + t := new(TSIG) + var h hash.Hash + switch strings.ToLower(rr.Algorithm) { + case HmacMD5: + h = hmac.New(md5.New, []byte(rawsecret)) + case HmacSHA1: + h = hmac.New(sha1.New, []byte(rawsecret)) + case HmacSHA256: + h = hmac.New(sha256.New, []byte(rawsecret)) + case HmacSHA512: + h = hmac.New(sha512.New, []byte(rawsecret)) + default: + return nil, "", ErrKeyAlg + } + io.WriteString(h, string(buf)) + t.MAC = hex.EncodeToString(h.Sum(nil)) + t.MACSize = uint16(len(t.MAC) / 2) // Size is half! + + t.Hdr = RR_Header{Name: rr.Hdr.Name, Rrtype: TypeTSIG, Class: ClassANY, Ttl: 0} + t.Fudge = rr.Fudge + t.TimeSigned = rr.TimeSigned + t.Algorithm = rr.Algorithm + t.OrigId = m.Id + + tbuf := make([]byte, t.len()) + if off, err := PackRR(t, tbuf, 0, nil, false); err == nil { + tbuf = tbuf[:off] // reset to actual size used + } else { + return nil, "", err + } + mbuf = append(mbuf, tbuf...) + // Update the ArCount directly in the buffer. + binary.BigEndian.PutUint16(mbuf[10:], uint16(len(m.Extra)+1)) + + return mbuf, t.MAC, nil +} + +// TsigVerify verifies the TSIG on a message. +// If the signature does not validate err contains the +// error, otherwise it is nil. +func TsigVerify(msg []byte, secret, requestMAC string, timersOnly bool) error { + rawsecret, err := fromBase64([]byte(secret)) + if err != nil { + return err + } + // Strip the TSIG from the incoming msg + stripped, tsig, err := stripTsig(msg) + if err != nil { + return err + } + + msgMAC, err := hex.DecodeString(tsig.MAC) + if err != nil { + return err + } + + buf := tsigBuffer(stripped, tsig, requestMAC, timersOnly) + + // Fudge factor works both ways. A message can arrive before it was signed because + // of clock skew. + now := uint64(time.Now().Unix()) + ti := now - tsig.TimeSigned + if now < tsig.TimeSigned { + ti = tsig.TimeSigned - now + } + if uint64(tsig.Fudge) < ti { + return ErrTime + } + + var h hash.Hash + switch strings.ToLower(tsig.Algorithm) { + case HmacMD5: + h = hmac.New(md5.New, rawsecret) + case HmacSHA1: + h = hmac.New(sha1.New, rawsecret) + case HmacSHA256: + h = hmac.New(sha256.New, rawsecret) + case HmacSHA512: + h = hmac.New(sha512.New, rawsecret) + default: + return ErrKeyAlg + } + h.Write(buf) + if !hmac.Equal(h.Sum(nil), msgMAC) { + return ErrSig + } + return nil +} + +// Create a wiredata buffer for the MAC calculation. +func tsigBuffer(msgbuf []byte, rr *TSIG, requestMAC string, timersOnly bool) []byte { + var buf []byte + if rr.TimeSigned == 0 { + rr.TimeSigned = uint64(time.Now().Unix()) + } + if rr.Fudge == 0 { + rr.Fudge = 300 // Standard (RFC) default. + } + + if requestMAC != "" { + m := new(macWireFmt) + m.MACSize = uint16(len(requestMAC) / 2) + m.MAC = requestMAC + buf = make([]byte, len(requestMAC)) // long enough + n, _ := packMacWire(m, buf) + buf = buf[:n] + } + + tsigvar := make([]byte, DefaultMsgSize) + if timersOnly { + tsig := new(timerWireFmt) + tsig.TimeSigned = rr.TimeSigned + tsig.Fudge = rr.Fudge + n, _ := packTimerWire(tsig, tsigvar) + tsigvar = tsigvar[:n] + } else { + tsig := new(tsigWireFmt) + tsig.Name = strings.ToLower(rr.Hdr.Name) + tsig.Class = ClassANY + tsig.Ttl = rr.Hdr.Ttl + tsig.Algorithm = strings.ToLower(rr.Algorithm) + tsig.TimeSigned = rr.TimeSigned + tsig.Fudge = rr.Fudge + tsig.Error = rr.Error + tsig.OtherLen = rr.OtherLen + tsig.OtherData = rr.OtherData + n, _ := packTsigWire(tsig, tsigvar) + tsigvar = tsigvar[:n] + } + + if requestMAC != "" { + x := append(buf, msgbuf...) + buf = append(x, tsigvar...) + } else { + buf = append(msgbuf, tsigvar...) + } + return buf +} + +// Strip the TSIG from the raw message. +func stripTsig(msg []byte) ([]byte, *TSIG, error) { + // Copied from msg.go's Unpack() Header, but modified. + var ( + dh Header + err error + ) + off, tsigoff := 0, 0 + + if dh, off, err = unpackMsgHdr(msg, off); err != nil { + return nil, nil, err + } + if dh.Arcount == 0 { + return nil, nil, ErrNoSig + } + + // Rcode, see msg.go Unpack() + if int(dh.Bits&0xF) == RcodeNotAuth { + return nil, nil, ErrAuth + } + + for i := 0; i < int(dh.Qdcount); i++ { + _, off, err = unpackQuestion(msg, off) + if err != nil { + return nil, nil, err + } + } + + _, off, err = unpackRRslice(int(dh.Ancount), msg, off) + if err != nil { + return nil, nil, err + } + _, off, err = unpackRRslice(int(dh.Nscount), msg, off) + if err != nil { + return nil, nil, err + } + + rr := new(TSIG) + var extra RR + for i := 0; i < int(dh.Arcount); i++ { + tsigoff = off + extra, off, err = UnpackRR(msg, off) + if err != nil { + return nil, nil, err + } + if extra.Header().Rrtype == TypeTSIG { + rr = extra.(*TSIG) + // Adjust Arcount. + arcount := binary.BigEndian.Uint16(msg[10:]) + binary.BigEndian.PutUint16(msg[10:], arcount-1) + break + } + } + if rr == nil { + return nil, nil, ErrNoSig + } + return msg[:tsigoff], rr, nil +} + +// Translate the TSIG time signed into a date. There is no +// need for RFC1982 calculations as this date is 48 bits. +func tsigTimeToString(t uint64) string { + ti := time.Unix(int64(t), 0).UTC() + return ti.Format("20060102150405") +} + +func packTsigWire(tw *tsigWireFmt, msg []byte) (int, error) { + // copied from zmsg.go TSIG packing + // RR_Header + off, err := PackDomainName(tw.Name, msg, 0, nil, false) + if err != nil { + return off, err + } + off, err = packUint16(tw.Class, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(tw.Ttl, msg, off) + if err != nil { + return off, err + } + + off, err = PackDomainName(tw.Algorithm, msg, off, nil, false) + if err != nil { + return off, err + } + off, err = packUint48(tw.TimeSigned, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(tw.Fudge, msg, off) + if err != nil { + return off, err + } + + off, err = packUint16(tw.Error, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(tw.OtherLen, msg, off) + if err != nil { + return off, err + } + off, err = packStringHex(tw.OtherData, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func packMacWire(mw *macWireFmt, msg []byte) (int, error) { + off, err := packUint16(mw.MACSize, msg, 0) + if err != nil { + return off, err + } + off, err = packStringHex(mw.MAC, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func packTimerWire(tw *timerWireFmt, msg []byte) (int, error) { + off, err := packUint48(tw.TimeSigned, msg, 0) + if err != nil { + return off, err + } + off, err = packUint16(tw.Fudge, msg, off) + if err != nil { + return off, err + } + return off, nil +} diff --git a/vendor/github.com/miekg/dns/types.go b/vendor/github.com/miekg/dns/types.go new file mode 100644 index 00000000..f63a18b3 --- /dev/null +++ b/vendor/github.com/miekg/dns/types.go @@ -0,0 +1,1294 @@ +package dns + +import ( + "fmt" + "net" + "strconv" + "strings" + "time" +) + +type ( + // Type is a DNS type. + Type uint16 + // Class is a DNS class. + Class uint16 + // Name is a DNS domain name. + Name string +) + +// Packet formats + +// Wire constants and supported types. +const ( + // valid RR_Header.Rrtype and Question.qtype + + TypeNone uint16 = 0 + TypeA uint16 = 1 + TypeNS uint16 = 2 + TypeMD uint16 = 3 + TypeMF uint16 = 4 + TypeCNAME uint16 = 5 + TypeSOA uint16 = 6 + TypeMB uint16 = 7 + TypeMG uint16 = 8 + TypeMR uint16 = 9 + TypeNULL uint16 = 10 + TypePTR uint16 = 12 + TypeHINFO uint16 = 13 + TypeMINFO uint16 = 14 + TypeMX uint16 = 15 + TypeTXT uint16 = 16 + TypeRP uint16 = 17 + TypeAFSDB uint16 = 18 + TypeX25 uint16 = 19 + TypeISDN uint16 = 20 + TypeRT uint16 = 21 + TypeNSAPPTR uint16 = 23 + TypeSIG uint16 = 24 + TypeKEY uint16 = 25 + TypePX uint16 = 26 + TypeGPOS uint16 = 27 + TypeAAAA uint16 = 28 + TypeLOC uint16 = 29 + TypeNXT uint16 = 30 + TypeEID uint16 = 31 + TypeNIMLOC uint16 = 32 + TypeSRV uint16 = 33 + TypeATMA uint16 = 34 + TypeNAPTR uint16 = 35 + TypeKX uint16 = 36 + TypeCERT uint16 = 37 + TypeDNAME uint16 = 39 + TypeOPT uint16 = 41 // EDNS + TypeDS uint16 = 43 + TypeSSHFP uint16 = 44 + TypeRRSIG uint16 = 46 + TypeNSEC uint16 = 47 + TypeDNSKEY uint16 = 48 + TypeDHCID uint16 = 49 + TypeNSEC3 uint16 = 50 + TypeNSEC3PARAM uint16 = 51 + TypeTLSA uint16 = 52 + TypeSMIMEA uint16 = 53 + TypeHIP uint16 = 55 + TypeNINFO uint16 = 56 + TypeRKEY uint16 = 57 + TypeTALINK uint16 = 58 + TypeCDS uint16 = 59 + TypeCDNSKEY uint16 = 60 + TypeOPENPGPKEY uint16 = 61 + TypeSPF uint16 = 99 + TypeUINFO uint16 = 100 + TypeUID uint16 = 101 + TypeGID uint16 = 102 + TypeUNSPEC uint16 = 103 + TypeNID uint16 = 104 + TypeL32 uint16 = 105 + TypeL64 uint16 = 106 + TypeLP uint16 = 107 + TypeEUI48 uint16 = 108 + TypeEUI64 uint16 = 109 + TypeURI uint16 = 256 + TypeCAA uint16 = 257 + + TypeTKEY uint16 = 249 + TypeTSIG uint16 = 250 + + // valid Question.Qtype only + TypeIXFR uint16 = 251 + TypeAXFR uint16 = 252 + TypeMAILB uint16 = 253 + TypeMAILA uint16 = 254 + TypeANY uint16 = 255 + + TypeTA uint16 = 32768 + TypeDLV uint16 = 32769 + TypeReserved uint16 = 65535 + + // valid Question.Qclass + ClassINET = 1 + ClassCSNET = 2 + ClassCHAOS = 3 + ClassHESIOD = 4 + ClassNONE = 254 + ClassANY = 255 + + // Message Response Codes. + RcodeSuccess = 0 + RcodeFormatError = 1 + RcodeServerFailure = 2 + RcodeNameError = 3 + RcodeNotImplemented = 4 + RcodeRefused = 5 + RcodeYXDomain = 6 + RcodeYXRrset = 7 + RcodeNXRrset = 8 + RcodeNotAuth = 9 + RcodeNotZone = 10 + RcodeBadSig = 16 // TSIG + RcodeBadVers = 16 // EDNS0 + RcodeBadKey = 17 + RcodeBadTime = 18 + RcodeBadMode = 19 // TKEY + RcodeBadName = 20 + RcodeBadAlg = 21 + RcodeBadTrunc = 22 // TSIG + RcodeBadCookie = 23 // DNS Cookies + + // Message Opcodes. There is no 3. + OpcodeQuery = 0 + OpcodeIQuery = 1 + OpcodeStatus = 2 + OpcodeNotify = 4 + OpcodeUpdate = 5 +) + +// Headers is the wire format for the DNS packet header. +type Header struct { + Id uint16 + Bits uint16 + Qdcount, Ancount, Nscount, Arcount uint16 +} + +const ( + headerSize = 12 + + // Header.Bits + _QR = 1 << 15 // query/response (response=1) + _AA = 1 << 10 // authoritative + _TC = 1 << 9 // truncated + _RD = 1 << 8 // recursion desired + _RA = 1 << 7 // recursion available + _Z = 1 << 6 // Z + _AD = 1 << 5 // authticated data + _CD = 1 << 4 // checking disabled + + LOC_EQUATOR = 1 << 31 // RFC 1876, Section 2. + LOC_PRIMEMERIDIAN = 1 << 31 // RFC 1876, Section 2. + + LOC_HOURS = 60 * 1000 + LOC_DEGREES = 60 * LOC_HOURS + + LOC_ALTITUDEBASE = 100000 +) + +// Different Certificate Types, see RFC 4398, Section 2.1 +const ( + CertPKIX = 1 + iota + CertSPKI + CertPGP + CertIPIX + CertISPKI + CertIPGP + CertACPKIX + CertIACPKIX + CertURI = 253 + CertOID = 254 +) + +// CertTypeToString converts the Cert Type to its string representation. +// See RFC 4398 and RFC 6944. +var CertTypeToString = map[uint16]string{ + CertPKIX: "PKIX", + CertSPKI: "SPKI", + CertPGP: "PGP", + CertIPIX: "IPIX", + CertISPKI: "ISPKI", + CertIPGP: "IPGP", + CertACPKIX: "ACPKIX", + CertIACPKIX: "IACPKIX", + CertURI: "URI", + CertOID: "OID", +} + +// StringToCertType is the reverseof CertTypeToString. +var StringToCertType = reverseInt16(CertTypeToString) + +//go:generate go run types_generate.go + +// Question holds a DNS question. There can be multiple questions in the +// question section of a message. Usually there is just one. +type Question struct { + Name string `dns:"cdomain-name"` // "cdomain-name" specifies encoding (and may be compressed) + Qtype uint16 + Qclass uint16 +} + +func (q *Question) len() int { + return len(q.Name) + 1 + 2 + 2 +} + +func (q *Question) String() (s string) { + // prefix with ; (as in dig) + s = ";" + sprintName(q.Name) + "\t" + s += Class(q.Qclass).String() + "\t" + s += " " + Type(q.Qtype).String() + return s +} + +// ANY is a wildcard record. See RFC 1035, Section 3.2.3. ANY +// is named "*" there. +type ANY struct { + Hdr RR_Header + // Does not have any rdata +} + +func (rr *ANY) String() string { return rr.Hdr.String() } + +type CNAME struct { + Hdr RR_Header + Target string `dns:"cdomain-name"` +} + +func (rr *CNAME) String() string { return rr.Hdr.String() + sprintName(rr.Target) } + +type HINFO struct { + Hdr RR_Header + Cpu string + Os string +} + +func (rr *HINFO) String() string { + return rr.Hdr.String() + sprintTxt([]string{rr.Cpu, rr.Os}) +} + +type MB struct { + Hdr RR_Header + Mb string `dns:"cdomain-name"` +} + +func (rr *MB) String() string { return rr.Hdr.String() + sprintName(rr.Mb) } + +type MG struct { + Hdr RR_Header + Mg string `dns:"cdomain-name"` +} + +func (rr *MG) String() string { return rr.Hdr.String() + sprintName(rr.Mg) } + +type MINFO struct { + Hdr RR_Header + Rmail string `dns:"cdomain-name"` + Email string `dns:"cdomain-name"` +} + +func (rr *MINFO) String() string { + return rr.Hdr.String() + sprintName(rr.Rmail) + " " + sprintName(rr.Email) +} + +type MR struct { + Hdr RR_Header + Mr string `dns:"cdomain-name"` +} + +func (rr *MR) String() string { + return rr.Hdr.String() + sprintName(rr.Mr) +} + +type MF struct { + Hdr RR_Header + Mf string `dns:"cdomain-name"` +} + +func (rr *MF) String() string { + return rr.Hdr.String() + sprintName(rr.Mf) +} + +type MD struct { + Hdr RR_Header + Md string `dns:"cdomain-name"` +} + +func (rr *MD) String() string { + return rr.Hdr.String() + sprintName(rr.Md) +} + +type MX struct { + Hdr RR_Header + Preference uint16 + Mx string `dns:"cdomain-name"` +} + +func (rr *MX) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + " " + sprintName(rr.Mx) +} + +type AFSDB struct { + Hdr RR_Header + Subtype uint16 + Hostname string `dns:"cdomain-name"` +} + +func (rr *AFSDB) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.Subtype)) + " " + sprintName(rr.Hostname) +} + +type X25 struct { + Hdr RR_Header + PSDNAddress string +} + +func (rr *X25) String() string { + return rr.Hdr.String() + rr.PSDNAddress +} + +type RT struct { + Hdr RR_Header + Preference uint16 + Host string `dns:"cdomain-name"` +} + +func (rr *RT) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + " " + sprintName(rr.Host) +} + +type NS struct { + Hdr RR_Header + Ns string `dns:"cdomain-name"` +} + +func (rr *NS) String() string { + return rr.Hdr.String() + sprintName(rr.Ns) +} + +type PTR struct { + Hdr RR_Header + Ptr string `dns:"cdomain-name"` +} + +func (rr *PTR) String() string { + return rr.Hdr.String() + sprintName(rr.Ptr) +} + +type RP struct { + Hdr RR_Header + Mbox string `dns:"domain-name"` + Txt string `dns:"domain-name"` +} + +func (rr *RP) String() string { + return rr.Hdr.String() + rr.Mbox + " " + sprintTxt([]string{rr.Txt}) +} + +type SOA struct { + Hdr RR_Header + Ns string `dns:"cdomain-name"` + Mbox string `dns:"cdomain-name"` + Serial uint32 + Refresh uint32 + Retry uint32 + Expire uint32 + Minttl uint32 +} + +func (rr *SOA) String() string { + return rr.Hdr.String() + sprintName(rr.Ns) + " " + sprintName(rr.Mbox) + + " " + strconv.FormatInt(int64(rr.Serial), 10) + + " " + strconv.FormatInt(int64(rr.Refresh), 10) + + " " + strconv.FormatInt(int64(rr.Retry), 10) + + " " + strconv.FormatInt(int64(rr.Expire), 10) + + " " + strconv.FormatInt(int64(rr.Minttl), 10) +} + +type TXT struct { + Hdr RR_Header + Txt []string `dns:"txt"` +} + +func (rr *TXT) String() string { return rr.Hdr.String() + sprintTxt(rr.Txt) } + +func sprintName(s string) string { + src := []byte(s) + dst := make([]byte, 0, len(src)) + for i := 0; i < len(src); { + if i+1 < len(src) && src[i] == '\\' && src[i+1] == '.' { + dst = append(dst, src[i:i+2]...) + i += 2 + } else { + b, n := nextByte(src, i) + if n == 0 { + i++ // dangling back slash + } else if b == '.' { + dst = append(dst, b) + } else { + dst = appendDomainNameByte(dst, b) + } + i += n + } + } + return string(dst) +} + +func sprintTxtOctet(s string) string { + src := []byte(s) + dst := make([]byte, 0, len(src)) + dst = append(dst, '"') + for i := 0; i < len(src); { + if i+1 < len(src) && src[i] == '\\' && src[i+1] == '.' { + dst = append(dst, src[i:i+2]...) + i += 2 + } else { + b, n := nextByte(src, i) + if n == 0 { + i++ // dangling back slash + } else if b == '.' { + dst = append(dst, b) + } else { + if b < ' ' || b > '~' { + dst = appendByte(dst, b) + } else { + dst = append(dst, b) + } + } + i += n + } + } + dst = append(dst, '"') + return string(dst) +} + +func sprintTxt(txt []string) string { + var out []byte + for i, s := range txt { + if i > 0 { + out = append(out, ` "`...) + } else { + out = append(out, '"') + } + bs := []byte(s) + for j := 0; j < len(bs); { + b, n := nextByte(bs, j) + if n == 0 { + break + } + out = appendTXTStringByte(out, b) + j += n + } + out = append(out, '"') + } + return string(out) +} + +func appendDomainNameByte(s []byte, b byte) []byte { + switch b { + case '.', ' ', '\'', '@', ';', '(', ')': // additional chars to escape + return append(s, '\\', b) + } + return appendTXTStringByte(s, b) +} + +func appendTXTStringByte(s []byte, b byte) []byte { + switch b { + case '\t': + return append(s, '\\', 't') + case '\r': + return append(s, '\\', 'r') + case '\n': + return append(s, '\\', 'n') + case '"', '\\': + return append(s, '\\', b) + } + if b < ' ' || b > '~' { + return appendByte(s, b) + } + return append(s, b) +} + +func appendByte(s []byte, b byte) []byte { + var buf [3]byte + bufs := strconv.AppendInt(buf[:0], int64(b), 10) + s = append(s, '\\') + for i := 0; i < 3-len(bufs); i++ { + s = append(s, '0') + } + for _, r := range bufs { + s = append(s, r) + } + return s +} + +func nextByte(b []byte, offset int) (byte, int) { + if offset >= len(b) { + return 0, 0 + } + if b[offset] != '\\' { + // not an escape sequence + return b[offset], 1 + } + switch len(b) - offset { + case 1: // dangling escape + return 0, 0 + case 2, 3: // too short to be \ddd + default: // maybe \ddd + if isDigit(b[offset+1]) && isDigit(b[offset+2]) && isDigit(b[offset+3]) { + return dddToByte(b[offset+1:]), 4 + } + } + // not \ddd, maybe a control char + switch b[offset+1] { + case 't': + return '\t', 2 + case 'r': + return '\r', 2 + case 'n': + return '\n', 2 + default: + return b[offset+1], 2 + } +} + +type SPF struct { + Hdr RR_Header + Txt []string `dns:"txt"` +} + +func (rr *SPF) String() string { return rr.Hdr.String() + sprintTxt(rr.Txt) } + +type SRV struct { + Hdr RR_Header + Priority uint16 + Weight uint16 + Port uint16 + Target string `dns:"domain-name"` +} + +func (rr *SRV) String() string { + return rr.Hdr.String() + + strconv.Itoa(int(rr.Priority)) + " " + + strconv.Itoa(int(rr.Weight)) + " " + + strconv.Itoa(int(rr.Port)) + " " + sprintName(rr.Target) +} + +type NAPTR struct { + Hdr RR_Header + Order uint16 + Preference uint16 + Flags string + Service string + Regexp string + Replacement string `dns:"domain-name"` +} + +func (rr *NAPTR) String() string { + return rr.Hdr.String() + + strconv.Itoa(int(rr.Order)) + " " + + strconv.Itoa(int(rr.Preference)) + " " + + "\"" + rr.Flags + "\" " + + "\"" + rr.Service + "\" " + + "\"" + rr.Regexp + "\" " + + rr.Replacement +} + +// The CERT resource record, see RFC 4398. +type CERT struct { + Hdr RR_Header + Type uint16 + KeyTag uint16 + Algorithm uint8 + Certificate string `dns:"base64"` +} + +func (rr *CERT) String() string { + var ( + ok bool + certtype, algorithm string + ) + if certtype, ok = CertTypeToString[rr.Type]; !ok { + certtype = strconv.Itoa(int(rr.Type)) + } + if algorithm, ok = AlgorithmToString[rr.Algorithm]; !ok { + algorithm = strconv.Itoa(int(rr.Algorithm)) + } + return rr.Hdr.String() + certtype + + " " + strconv.Itoa(int(rr.KeyTag)) + + " " + algorithm + + " " + rr.Certificate +} + +// The DNAME resource record, see RFC 2672. +type DNAME struct { + Hdr RR_Header + Target string `dns:"domain-name"` +} + +func (rr *DNAME) String() string { + return rr.Hdr.String() + sprintName(rr.Target) +} + +type A struct { + Hdr RR_Header + A net.IP `dns:"a"` +} + +func (rr *A) String() string { + if rr.A == nil { + return rr.Hdr.String() + } + return rr.Hdr.String() + rr.A.String() +} + +type AAAA struct { + Hdr RR_Header + AAAA net.IP `dns:"aaaa"` +} + +func (rr *AAAA) String() string { + if rr.AAAA == nil { + return rr.Hdr.String() + } + return rr.Hdr.String() + rr.AAAA.String() +} + +type PX struct { + Hdr RR_Header + Preference uint16 + Map822 string `dns:"domain-name"` + Mapx400 string `dns:"domain-name"` +} + +func (rr *PX) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + " " + sprintName(rr.Map822) + " " + sprintName(rr.Mapx400) +} + +type GPOS struct { + Hdr RR_Header + Longitude string + Latitude string + Altitude string +} + +func (rr *GPOS) String() string { + return rr.Hdr.String() + rr.Longitude + " " + rr.Latitude + " " + rr.Altitude +} + +type LOC struct { + Hdr RR_Header + Version uint8 + Size uint8 + HorizPre uint8 + VertPre uint8 + Latitude uint32 + Longitude uint32 + Altitude uint32 +} + +// cmToM takes a cm value expressed in RFC1876 SIZE mantissa/exponent +// format and returns a string in m (two decimals for the cm) +func cmToM(m, e uint8) string { + if e < 2 { + if e == 1 { + m *= 10 + } + + return fmt.Sprintf("0.%02d", m) + } + + s := fmt.Sprintf("%d", m) + for e > 2 { + s += "0" + e-- + } + return s +} + +func (rr *LOC) String() string { + s := rr.Hdr.String() + + lat := rr.Latitude + ns := "N" + if lat > LOC_EQUATOR { + lat = lat - LOC_EQUATOR + } else { + ns = "S" + lat = LOC_EQUATOR - lat + } + h := lat / LOC_DEGREES + lat = lat % LOC_DEGREES + m := lat / LOC_HOURS + lat = lat % LOC_HOURS + s += fmt.Sprintf("%02d %02d %0.3f %s ", h, m, (float64(lat) / 1000), ns) + + lon := rr.Longitude + ew := "E" + if lon > LOC_PRIMEMERIDIAN { + lon = lon - LOC_PRIMEMERIDIAN + } else { + ew = "W" + lon = LOC_PRIMEMERIDIAN - lon + } + h = lon / LOC_DEGREES + lon = lon % LOC_DEGREES + m = lon / LOC_HOURS + lon = lon % LOC_HOURS + s += fmt.Sprintf("%02d %02d %0.3f %s ", h, m, (float64(lon) / 1000), ew) + + var alt = float64(rr.Altitude) / 100 + alt -= LOC_ALTITUDEBASE + if rr.Altitude%100 != 0 { + s += fmt.Sprintf("%.2fm ", alt) + } else { + s += fmt.Sprintf("%.0fm ", alt) + } + + s += cmToM((rr.Size&0xf0)>>4, rr.Size&0x0f) + "m " + s += cmToM((rr.HorizPre&0xf0)>>4, rr.HorizPre&0x0f) + "m " + s += cmToM((rr.VertPre&0xf0)>>4, rr.VertPre&0x0f) + "m" + + return s +} + +// SIG is identical to RRSIG and nowadays only used for SIG(0), RFC2931. +type SIG struct { + RRSIG +} + +type RRSIG struct { + Hdr RR_Header + TypeCovered uint16 + Algorithm uint8 + Labels uint8 + OrigTtl uint32 + Expiration uint32 + Inception uint32 + KeyTag uint16 + SignerName string `dns:"domain-name"` + Signature string `dns:"base64"` +} + +func (rr *RRSIG) String() string { + s := rr.Hdr.String() + s += Type(rr.TypeCovered).String() + s += " " + strconv.Itoa(int(rr.Algorithm)) + + " " + strconv.Itoa(int(rr.Labels)) + + " " + strconv.FormatInt(int64(rr.OrigTtl), 10) + + " " + TimeToString(rr.Expiration) + + " " + TimeToString(rr.Inception) + + " " + strconv.Itoa(int(rr.KeyTag)) + + " " + sprintName(rr.SignerName) + + " " + rr.Signature + return s +} + +type NSEC struct { + Hdr RR_Header + NextDomain string `dns:"domain-name"` + TypeBitMap []uint16 `dns:"nsec"` +} + +func (rr *NSEC) String() string { + s := rr.Hdr.String() + sprintName(rr.NextDomain) + for i := 0; i < len(rr.TypeBitMap); i++ { + s += " " + Type(rr.TypeBitMap[i]).String() + } + return s +} + +func (rr *NSEC) len() int { + l := rr.Hdr.len() + len(rr.NextDomain) + 1 + lastwindow := uint32(2 ^ 32 + 1) + for _, t := range rr.TypeBitMap { + window := t / 256 + if uint32(window) != lastwindow { + l += 1 + 32 + } + lastwindow = uint32(window) + } + return l +} + +type DLV struct { + DS +} + +type CDS struct { + DS +} + +type DS struct { + Hdr RR_Header + KeyTag uint16 + Algorithm uint8 + DigestType uint8 + Digest string `dns:"hex"` +} + +func (rr *DS) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.KeyTag)) + + " " + strconv.Itoa(int(rr.Algorithm)) + + " " + strconv.Itoa(int(rr.DigestType)) + + " " + strings.ToUpper(rr.Digest) +} + +type KX struct { + Hdr RR_Header + Preference uint16 + Exchanger string `dns:"domain-name"` +} + +func (rr *KX) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + + " " + sprintName(rr.Exchanger) +} + +type TA struct { + Hdr RR_Header + KeyTag uint16 + Algorithm uint8 + DigestType uint8 + Digest string `dns:"hex"` +} + +func (rr *TA) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.KeyTag)) + + " " + strconv.Itoa(int(rr.Algorithm)) + + " " + strconv.Itoa(int(rr.DigestType)) + + " " + strings.ToUpper(rr.Digest) +} + +type TALINK struct { + Hdr RR_Header + PreviousName string `dns:"domain-name"` + NextName string `dns:"domain-name"` +} + +func (rr *TALINK) String() string { + return rr.Hdr.String() + + sprintName(rr.PreviousName) + " " + sprintName(rr.NextName) +} + +type SSHFP struct { + Hdr RR_Header + Algorithm uint8 + Type uint8 + FingerPrint string `dns:"hex"` +} + +func (rr *SSHFP) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.Algorithm)) + + " " + strconv.Itoa(int(rr.Type)) + + " " + strings.ToUpper(rr.FingerPrint) +} + +type KEY struct { + DNSKEY +} + +type CDNSKEY struct { + DNSKEY +} + +type DNSKEY struct { + Hdr RR_Header + Flags uint16 + Protocol uint8 + Algorithm uint8 + PublicKey string `dns:"base64"` +} + +func (rr *DNSKEY) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.Flags)) + + " " + strconv.Itoa(int(rr.Protocol)) + + " " + strconv.Itoa(int(rr.Algorithm)) + + " " + rr.PublicKey +} + +type RKEY struct { + Hdr RR_Header + Flags uint16 + Protocol uint8 + Algorithm uint8 + PublicKey string `dns:"base64"` +} + +func (rr *RKEY) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.Flags)) + + " " + strconv.Itoa(int(rr.Protocol)) + + " " + strconv.Itoa(int(rr.Algorithm)) + + " " + rr.PublicKey +} + +type NSAPPTR struct { + Hdr RR_Header + Ptr string `dns:"domain-name"` +} + +func (rr *NSAPPTR) String() string { return rr.Hdr.String() + sprintName(rr.Ptr) } + +type NSEC3 struct { + Hdr RR_Header + Hash uint8 + Flags uint8 + Iterations uint16 + SaltLength uint8 + Salt string `dns:"size-hex:SaltLength"` + HashLength uint8 + NextDomain string `dns:"size-base32:HashLength"` + TypeBitMap []uint16 `dns:"nsec"` +} + +func (rr *NSEC3) String() string { + s := rr.Hdr.String() + s += strconv.Itoa(int(rr.Hash)) + + " " + strconv.Itoa(int(rr.Flags)) + + " " + strconv.Itoa(int(rr.Iterations)) + + " " + saltToString(rr.Salt) + + " " + rr.NextDomain + for i := 0; i < len(rr.TypeBitMap); i++ { + s += " " + Type(rr.TypeBitMap[i]).String() + } + return s +} + +func (rr *NSEC3) len() int { + l := rr.Hdr.len() + 6 + len(rr.Salt)/2 + 1 + len(rr.NextDomain) + 1 + lastwindow := uint32(2 ^ 32 + 1) + for _, t := range rr.TypeBitMap { + window := t / 256 + if uint32(window) != lastwindow { + l += 1 + 32 + } + lastwindow = uint32(window) + } + return l +} + +type NSEC3PARAM struct { + Hdr RR_Header + Hash uint8 + Flags uint8 + Iterations uint16 + SaltLength uint8 + Salt string `dns:"size-hex:SaltLength"` +} + +func (rr *NSEC3PARAM) String() string { + s := rr.Hdr.String() + s += strconv.Itoa(int(rr.Hash)) + + " " + strconv.Itoa(int(rr.Flags)) + + " " + strconv.Itoa(int(rr.Iterations)) + + " " + saltToString(rr.Salt) + return s +} + +type TKEY struct { + Hdr RR_Header + Algorithm string `dns:"domain-name"` + Inception uint32 + Expiration uint32 + Mode uint16 + Error uint16 + KeySize uint16 + Key string + OtherLen uint16 + OtherData string +} + +func (rr *TKEY) String() string { + // It has no presentation format + return "" +} + +// RFC3597 represents an unknown/generic RR. +type RFC3597 struct { + Hdr RR_Header + Rdata string `dns:"hex"` +} + +func (rr *RFC3597) String() string { + // Let's call it a hack + s := rfc3597Header(rr.Hdr) + + s += "\\# " + strconv.Itoa(len(rr.Rdata)/2) + " " + rr.Rdata + return s +} + +func rfc3597Header(h RR_Header) string { + var s string + + s += sprintName(h.Name) + "\t" + s += strconv.FormatInt(int64(h.Ttl), 10) + "\t" + s += "CLASS" + strconv.Itoa(int(h.Class)) + "\t" + s += "TYPE" + strconv.Itoa(int(h.Rrtype)) + "\t" + return s +} + +type URI struct { + Hdr RR_Header + Priority uint16 + Weight uint16 + Target string `dns:"octet"` +} + +func (rr *URI) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.Priority)) + + " " + strconv.Itoa(int(rr.Weight)) + " " + sprintTxtOctet(rr.Target) +} + +type DHCID struct { + Hdr RR_Header + Digest string `dns:"base64"` +} + +func (rr *DHCID) String() string { return rr.Hdr.String() + rr.Digest } + +type TLSA struct { + Hdr RR_Header + Usage uint8 + Selector uint8 + MatchingType uint8 + Certificate string `dns:"hex"` +} + +func (rr *TLSA) String() string { + return rr.Hdr.String() + + strconv.Itoa(int(rr.Usage)) + + " " + strconv.Itoa(int(rr.Selector)) + + " " + strconv.Itoa(int(rr.MatchingType)) + + " " + rr.Certificate +} + +type SMIMEA struct { + Hdr RR_Header + Usage uint8 + Selector uint8 + MatchingType uint8 + Certificate string `dns:"hex"` +} + +func (rr *SMIMEA) String() string { + s := rr.Hdr.String() + + strconv.Itoa(int(rr.Usage)) + + " " + strconv.Itoa(int(rr.Selector)) + + " " + strconv.Itoa(int(rr.MatchingType)) + + // Every Nth char needs a space on this output. If we output + // this as one giant line, we can't read it can in because in some cases + // the cert length overflows scan.maxTok (2048). + sx := splitN(rr.Certificate, 1024) // conservative value here + s += " " + strings.Join(sx, " ") + return s +} + +type HIP struct { + Hdr RR_Header + HitLength uint8 + PublicKeyAlgorithm uint8 + PublicKeyLength uint16 + Hit string `dns:"size-hex:HitLength"` + PublicKey string `dns:"size-base64:PublicKeyLength"` + RendezvousServers []string `dns:"domain-name"` +} + +func (rr *HIP) String() string { + s := rr.Hdr.String() + + strconv.Itoa(int(rr.PublicKeyAlgorithm)) + + " " + rr.Hit + + " " + rr.PublicKey + for _, d := range rr.RendezvousServers { + s += " " + sprintName(d) + } + return s +} + +type NINFO struct { + Hdr RR_Header + ZSData []string `dns:"txt"` +} + +func (rr *NINFO) String() string { return rr.Hdr.String() + sprintTxt(rr.ZSData) } + +type NID struct { + Hdr RR_Header + Preference uint16 + NodeID uint64 +} + +func (rr *NID) String() string { + s := rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + node := fmt.Sprintf("%0.16x", rr.NodeID) + s += " " + node[0:4] + ":" + node[4:8] + ":" + node[8:12] + ":" + node[12:16] + return s +} + +type L32 struct { + Hdr RR_Header + Preference uint16 + Locator32 net.IP `dns:"a"` +} + +func (rr *L32) String() string { + if rr.Locator32 == nil { + return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + } + return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + + " " + rr.Locator32.String() +} + +type L64 struct { + Hdr RR_Header + Preference uint16 + Locator64 uint64 +} + +func (rr *L64) String() string { + s := rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + node := fmt.Sprintf("%0.16X", rr.Locator64) + s += " " + node[0:4] + ":" + node[4:8] + ":" + node[8:12] + ":" + node[12:16] + return s +} + +type LP struct { + Hdr RR_Header + Preference uint16 + Fqdn string `dns:"domain-name"` +} + +func (rr *LP) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + " " + sprintName(rr.Fqdn) +} + +type EUI48 struct { + Hdr RR_Header + Address uint64 `dns:"uint48"` +} + +func (rr *EUI48) String() string { return rr.Hdr.String() + euiToString(rr.Address, 48) } + +type EUI64 struct { + Hdr RR_Header + Address uint64 +} + +func (rr *EUI64) String() string { return rr.Hdr.String() + euiToString(rr.Address, 64) } + +type CAA struct { + Hdr RR_Header + Flag uint8 + Tag string + Value string `dns:"octet"` +} + +func (rr *CAA) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.Flag)) + " " + rr.Tag + " " + sprintTxtOctet(rr.Value) +} + +type UID struct { + Hdr RR_Header + Uid uint32 +} + +func (rr *UID) String() string { return rr.Hdr.String() + strconv.FormatInt(int64(rr.Uid), 10) } + +type GID struct { + Hdr RR_Header + Gid uint32 +} + +func (rr *GID) String() string { return rr.Hdr.String() + strconv.FormatInt(int64(rr.Gid), 10) } + +type UINFO struct { + Hdr RR_Header + Uinfo string +} + +func (rr *UINFO) String() string { return rr.Hdr.String() + sprintTxt([]string{rr.Uinfo}) } + +type EID struct { + Hdr RR_Header + Endpoint string `dns:"hex"` +} + +func (rr *EID) String() string { return rr.Hdr.String() + strings.ToUpper(rr.Endpoint) } + +type NIMLOC struct { + Hdr RR_Header + Locator string `dns:"hex"` +} + +func (rr *NIMLOC) String() string { return rr.Hdr.String() + strings.ToUpper(rr.Locator) } + +type OPENPGPKEY struct { + Hdr RR_Header + PublicKey string `dns:"base64"` +} + +func (rr *OPENPGPKEY) String() string { return rr.Hdr.String() + rr.PublicKey } + +// TimeToString translates the RRSIG's incep. and expir. times to the +// string representation used when printing the record. +// It takes serial arithmetic (RFC 1982) into account. +func TimeToString(t uint32) string { + mod := ((int64(t) - time.Now().Unix()) / year68) - 1 + if mod < 0 { + mod = 0 + } + ti := time.Unix(int64(t)-(mod*year68), 0).UTC() + return ti.Format("20060102150405") +} + +// StringToTime translates the RRSIG's incep. and expir. times from +// string values like "20110403154150" to an 32 bit integer. +// It takes serial arithmetic (RFC 1982) into account. +func StringToTime(s string) (uint32, error) { + t, err := time.Parse("20060102150405", s) + if err != nil { + return 0, err + } + mod := (t.Unix() / year68) - 1 + if mod < 0 { + mod = 0 + } + return uint32(t.Unix() - (mod * year68)), nil +} + +// saltToString converts a NSECX salt to uppercase and returns "-" when it is empty. +func saltToString(s string) string { + if len(s) == 0 { + return "-" + } + return strings.ToUpper(s) +} + +func euiToString(eui uint64, bits int) (hex string) { + switch bits { + case 64: + hex = fmt.Sprintf("%16.16x", eui) + hex = hex[0:2] + "-" + hex[2:4] + "-" + hex[4:6] + "-" + hex[6:8] + + "-" + hex[8:10] + "-" + hex[10:12] + "-" + hex[12:14] + "-" + hex[14:16] + case 48: + hex = fmt.Sprintf("%12.12x", eui) + hex = hex[0:2] + "-" + hex[2:4] + "-" + hex[4:6] + "-" + hex[6:8] + + "-" + hex[8:10] + "-" + hex[10:12] + } + return +} + +// copyIP returns a copy of ip. +func copyIP(ip net.IP) net.IP { + p := make(net.IP, len(ip)) + copy(p, ip) + return p +} + +// SplitN splits a string into N sized string chunks. +// This might become an exported function once. +func splitN(s string, n int) []string { + if len(s) < n { + return []string{s} + } + sx := []string{} + p, i := 0, n + for { + if i <= len(s) { + sx = append(sx, s[p:i]) + } else { + sx = append(sx, s[p:]) + break + + } + p, i = p+n, i+n + } + + return sx +} diff --git a/vendor/github.com/miekg/dns/types_generate.go b/vendor/github.com/miekg/dns/types_generate.go new file mode 100644 index 00000000..bf80da32 --- /dev/null +++ b/vendor/github.com/miekg/dns/types_generate.go @@ -0,0 +1,271 @@ +//+build ignore + +// types_generate.go is meant to run with go generate. It will use +// go/{importer,types} to track down all the RR struct types. Then for each type +// it will generate conversion tables (TypeToRR and TypeToString) and banal +// methods (len, Header, copy) based on the struct tags. The generated source is +// written to ztypes.go, and is meant to be checked into git. +package main + +import ( + "bytes" + "fmt" + "go/format" + "go/importer" + "go/types" + "log" + "os" + "strings" + "text/template" +) + +var skipLen = map[string]struct{}{ + "NSEC": {}, + "NSEC3": {}, + "OPT": {}, +} + +var packageHdr = ` +// *** DO NOT MODIFY *** +// AUTOGENERATED BY go generate from type_generate.go + +package dns + +import ( + "encoding/base64" + "net" +) + +` + +var TypeToRR = template.Must(template.New("TypeToRR").Parse(` +// TypeToRR is a map of constructors for each RR type. +var TypeToRR = map[uint16]func() RR{ +{{range .}}{{if ne . "RFC3597"}} Type{{.}}: func() RR { return new({{.}}) }, +{{end}}{{end}} } + +`)) + +var typeToString = template.Must(template.New("typeToString").Parse(` +// TypeToString is a map of strings for each RR type. +var TypeToString = map[uint16]string{ +{{range .}}{{if ne . "NSAPPTR"}} Type{{.}}: "{{.}}", +{{end}}{{end}} TypeNSAPPTR: "NSAP-PTR", +} + +`)) + +var headerFunc = template.Must(template.New("headerFunc").Parse(` +// Header() functions +{{range .}} func (rr *{{.}}) Header() *RR_Header { return &rr.Hdr } +{{end}} + +`)) + +// getTypeStruct will take a type and the package scope, and return the +// (innermost) struct if the type is considered a RR type (currently defined as +// those structs beginning with a RR_Header, could be redefined as implementing +// the RR interface). The bool return value indicates if embedded structs were +// resolved. +func getTypeStruct(t types.Type, scope *types.Scope) (*types.Struct, bool) { + st, ok := t.Underlying().(*types.Struct) + if !ok { + return nil, false + } + if st.Field(0).Type() == scope.Lookup("RR_Header").Type() { + return st, false + } + if st.Field(0).Anonymous() { + st, _ := getTypeStruct(st.Field(0).Type(), scope) + return st, true + } + return nil, false +} + +func main() { + // Import and type-check the package + pkg, err := importer.Default().Import("github.com/miekg/dns") + fatalIfErr(err) + scope := pkg.Scope() + + // Collect constants like TypeX + var numberedTypes []string + for _, name := range scope.Names() { + o := scope.Lookup(name) + if o == nil || !o.Exported() { + continue + } + b, ok := o.Type().(*types.Basic) + if !ok || b.Kind() != types.Uint16 { + continue + } + if !strings.HasPrefix(o.Name(), "Type") { + continue + } + name := strings.TrimPrefix(o.Name(), "Type") + if name == "PrivateRR" { + continue + } + numberedTypes = append(numberedTypes, name) + } + + // Collect actual types (*X) + var namedTypes []string + for _, name := range scope.Names() { + o := scope.Lookup(name) + if o == nil || !o.Exported() { + continue + } + if st, _ := getTypeStruct(o.Type(), scope); st == nil { + continue + } + if name == "PrivateRR" { + continue + } + + // Check if corresponding TypeX exists + if scope.Lookup("Type"+o.Name()) == nil && o.Name() != "RFC3597" { + log.Fatalf("Constant Type%s does not exist.", o.Name()) + } + + namedTypes = append(namedTypes, o.Name()) + } + + b := &bytes.Buffer{} + b.WriteString(packageHdr) + + // Generate TypeToRR + fatalIfErr(TypeToRR.Execute(b, namedTypes)) + + // Generate typeToString + fatalIfErr(typeToString.Execute(b, numberedTypes)) + + // Generate headerFunc + fatalIfErr(headerFunc.Execute(b, namedTypes)) + + // Generate len() + fmt.Fprint(b, "// len() functions\n") + for _, name := range namedTypes { + if _, ok := skipLen[name]; ok { + continue + } + o := scope.Lookup(name) + st, isEmbedded := getTypeStruct(o.Type(), scope) + if isEmbedded { + continue + } + fmt.Fprintf(b, "func (rr *%s) len() int {\n", name) + fmt.Fprintf(b, "l := rr.Hdr.len()\n") + for i := 1; i < st.NumFields(); i++ { + o := func(s string) { fmt.Fprintf(b, s, st.Field(i).Name()) } + + if _, ok := st.Field(i).Type().(*types.Slice); ok { + switch st.Tag(i) { + case `dns:"-"`: + // ignored + case `dns:"cdomain-name"`, `dns:"domain-name"`, `dns:"txt"`: + o("for _, x := range rr.%s { l += len(x) + 1 }\n") + default: + log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) + } + continue + } + + switch { + case st.Tag(i) == `dns:"-"`: + // ignored + case st.Tag(i) == `dns:"cdomain-name"`, st.Tag(i) == `dns:"domain-name"`: + o("l += len(rr.%s) + 1\n") + case st.Tag(i) == `dns:"octet"`: + o("l += len(rr.%s)\n") + case strings.HasPrefix(st.Tag(i), `dns:"size-base64`): + fallthrough + case st.Tag(i) == `dns:"base64"`: + o("l += base64.StdEncoding.DecodedLen(len(rr.%s))\n") + case strings.HasPrefix(st.Tag(i), `dns:"size-hex`): + fallthrough + case st.Tag(i) == `dns:"hex"`: + o("l += len(rr.%s)/2 + 1\n") + case st.Tag(i) == `dns:"a"`: + o("l += net.IPv4len // %s\n") + case st.Tag(i) == `dns:"aaaa"`: + o("l += net.IPv6len // %s\n") + case st.Tag(i) == `dns:"txt"`: + o("for _, t := range rr.%s { l += len(t) + 1 }\n") + case st.Tag(i) == `dns:"uint48"`: + o("l += 6 // %s\n") + case st.Tag(i) == "": + switch st.Field(i).Type().(*types.Basic).Kind() { + case types.Uint8: + o("l += 1 // %s\n") + case types.Uint16: + o("l += 2 // %s\n") + case types.Uint32: + o("l += 4 // %s\n") + case types.Uint64: + o("l += 8 // %s\n") + case types.String: + o("l += len(rr.%s) + 1\n") + default: + log.Fatalln(name, st.Field(i).Name()) + } + default: + log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) + } + } + fmt.Fprintf(b, "return l }\n") + } + + // Generate copy() + fmt.Fprint(b, "// copy() functions\n") + for _, name := range namedTypes { + o := scope.Lookup(name) + st, isEmbedded := getTypeStruct(o.Type(), scope) + if isEmbedded { + continue + } + fmt.Fprintf(b, "func (rr *%s) copy() RR {\n", name) + fields := []string{"*rr.Hdr.copyHeader()"} + for i := 1; i < st.NumFields(); i++ { + f := st.Field(i).Name() + if sl, ok := st.Field(i).Type().(*types.Slice); ok { + t := sl.Underlying().String() + t = strings.TrimPrefix(t, "[]") + if strings.Contains(t, ".") { + splits := strings.Split(t, ".") + t = splits[len(splits)-1] + } + fmt.Fprintf(b, "%s := make([]%s, len(rr.%s)); copy(%s, rr.%s)\n", + f, t, f, f, f) + fields = append(fields, f) + continue + } + if st.Field(i).Type().String() == "net.IP" { + fields = append(fields, "copyIP(rr."+f+")") + continue + } + fields = append(fields, "rr."+f) + } + fmt.Fprintf(b, "return &%s{%s}\n", name, strings.Join(fields, ",")) + fmt.Fprintf(b, "}\n") + } + + // gofmt + res, err := format.Source(b.Bytes()) + if err != nil { + b.WriteTo(os.Stderr) + log.Fatal(err) + } + + // write result + f, err := os.Create("ztypes.go") + fatalIfErr(err) + defer f.Close() + f.Write(res) +} + +func fatalIfErr(err error) { + if err != nil { + log.Fatal(err) + } +} diff --git a/vendor/github.com/miekg/dns/udp.go b/vendor/github.com/miekg/dns/udp.go new file mode 100644 index 00000000..c79c6c88 --- /dev/null +++ b/vendor/github.com/miekg/dns/udp.go @@ -0,0 +1,58 @@ +// +build !windows,!plan9 + +package dns + +import ( + "net" + "syscall" +) + +// SessionUDP holds the remote address and the associated +// out-of-band data. +type SessionUDP struct { + raddr *net.UDPAddr + context []byte +} + +// RemoteAddr returns the remote network address. +func (s *SessionUDP) RemoteAddr() net.Addr { return s.raddr } + +// setUDPSocketOptions sets the UDP socket options. +// This function is implemented on a per platform basis. See udp_*.go for more details +func setUDPSocketOptions(conn *net.UDPConn) error { + sa, err := getUDPSocketName(conn) + if err != nil { + return err + } + switch sa.(type) { + case *syscall.SockaddrInet6: + v6only, err := getUDPSocketOptions6Only(conn) + if err != nil { + return err + } + setUDPSocketOptions6(conn) + if !v6only { + setUDPSocketOptions4(conn) + } + case *syscall.SockaddrInet4: + setUDPSocketOptions4(conn) + } + return nil +} + +// ReadFromSessionUDP acts just like net.UDPConn.ReadFrom(), but returns a session object instead of a +// net.UDPAddr. +func ReadFromSessionUDP(conn *net.UDPConn, b []byte) (int, *SessionUDP, error) { + oob := make([]byte, 40) + n, oobn, _, raddr, err := conn.ReadMsgUDP(b, oob) + if err != nil { + return n, nil, err + } + return n, &SessionUDP{raddr, oob[:oobn]}, err +} + +// WriteToSessionUDP acts just like net.UDPConn.WritetTo(), but uses a *SessionUDP instead of a net.Addr. +func WriteToSessionUDP(conn *net.UDPConn, b []byte, session *SessionUDP) (int, error) { + n, _, err := conn.WriteMsgUDP(b, session.context, session.raddr) + return n, err +} diff --git a/vendor/github.com/miekg/dns/udp_linux.go b/vendor/github.com/miekg/dns/udp_linux.go new file mode 100644 index 00000000..c62d2188 --- /dev/null +++ b/vendor/github.com/miekg/dns/udp_linux.go @@ -0,0 +1,73 @@ +// +build linux + +package dns + +// See: +// * http://stackoverflow.com/questions/3062205/setting-the-source-ip-for-a-udp-socket and +// * http://blog.powerdns.com/2012/10/08/on-binding-datagram-udp-sockets-to-the-any-addresses/ +// +// Why do we need this: When listening on 0.0.0.0 with UDP so kernel decides what is the outgoing +// interface, this might not always be the correct one. This code will make sure the egress +// packet's interface matched the ingress' one. + +import ( + "net" + "syscall" +) + +// setUDPSocketOptions4 prepares the v4 socket for sessions. +func setUDPSocketOptions4(conn *net.UDPConn) error { + file, err := conn.File() + if err != nil { + return err + } + if err := syscall.SetsockoptInt(int(file.Fd()), syscall.IPPROTO_IP, syscall.IP_PKTINFO, 1); err != nil { + return err + } + // Calling File() above results in the connection becoming blocking, we must fix that. + // See https://github.com/miekg/dns/issues/279 + err = syscall.SetNonblock(int(file.Fd()), true) + if err != nil { + return err + } + return nil +} + +// setUDPSocketOptions6 prepares the v6 socket for sessions. +func setUDPSocketOptions6(conn *net.UDPConn) error { + file, err := conn.File() + if err != nil { + return err + } + if err := syscall.SetsockoptInt(int(file.Fd()), syscall.IPPROTO_IPV6, syscall.IPV6_RECVPKTINFO, 1); err != nil { + return err + } + err = syscall.SetNonblock(int(file.Fd()), true) + if err != nil { + return err + } + return nil +} + +// getUDPSocketOption6Only return true if the socket is v6 only and false when it is v4/v6 combined +// (dualstack). +func getUDPSocketOptions6Only(conn *net.UDPConn) (bool, error) { + file, err := conn.File() + if err != nil { + return false, err + } + // dual stack. See http://stackoverflow.com/questions/1618240/how-to-support-both-ipv4-and-ipv6-connections + v6only, err := syscall.GetsockoptInt(int(file.Fd()), syscall.IPPROTO_IPV6, syscall.IPV6_V6ONLY) + if err != nil { + return false, err + } + return v6only == 1, nil +} + +func getUDPSocketName(conn *net.UDPConn) (syscall.Sockaddr, error) { + file, err := conn.File() + if err != nil { + return nil, err + } + return syscall.Getsockname(int(file.Fd())) +} diff --git a/vendor/github.com/miekg/dns/udp_other.go b/vendor/github.com/miekg/dns/udp_other.go new file mode 100644 index 00000000..d4073244 --- /dev/null +++ b/vendor/github.com/miekg/dns/udp_other.go @@ -0,0 +1,17 @@ +// +build !linux,!plan9 + +package dns + +import ( + "net" + "syscall" +) + +// These do nothing. See udp_linux.go for an example of how to implement this. + +// We tried to adhire to some kind of naming scheme. + +func setUDPSocketOptions4(conn *net.UDPConn) error { return nil } +func setUDPSocketOptions6(conn *net.UDPConn) error { return nil } +func getUDPSocketOptions6Only(conn *net.UDPConn) (bool, error) { return false, nil } +func getUDPSocketName(conn *net.UDPConn) (syscall.Sockaddr, error) { return nil, nil } diff --git a/vendor/github.com/miekg/dns/udp_plan9.go b/vendor/github.com/miekg/dns/udp_plan9.go new file mode 100644 index 00000000..b794deeb --- /dev/null +++ b/vendor/github.com/miekg/dns/udp_plan9.go @@ -0,0 +1,34 @@ +package dns + +import ( + "net" +) + +func setUDPSocketOptions(conn *net.UDPConn) error { return nil } + +// SessionUDP holds the remote address and the associated +// out-of-band data. +type SessionUDP struct { + raddr *net.UDPAddr + context []byte +} + +// RemoteAddr returns the remote network address. +func (s *SessionUDP) RemoteAddr() net.Addr { return s.raddr } + +// ReadFromSessionUDP acts just like net.UDPConn.ReadFrom(), but returns a session object instead of a +// net.UDPAddr. +func ReadFromSessionUDP(conn *net.UDPConn, b []byte) (int, *SessionUDP, error) { + oob := make([]byte, 40) + n, oobn, _, raddr, err := conn.ReadMsgUDP(b, oob) + if err != nil { + return n, nil, err + } + return n, &SessionUDP{raddr, oob[:oobn]}, err +} + +// WriteToSessionUDP acts just like net.UDPConn.WritetTo(), but uses a *SessionUDP instead of a net.Addr. +func WriteToSessionUDP(conn *net.UDPConn, b []byte, session *SessionUDP) (int, error) { + n, _, err := conn.WriteMsgUDP(b, session.context, session.raddr) + return n, err +} diff --git a/vendor/github.com/miekg/dns/udp_windows.go b/vendor/github.com/miekg/dns/udp_windows.go new file mode 100644 index 00000000..2ce4b330 --- /dev/null +++ b/vendor/github.com/miekg/dns/udp_windows.go @@ -0,0 +1,34 @@ +// +build windows + +package dns + +import "net" + +type SessionUDP struct { + raddr *net.UDPAddr +} + +// ReadFromSessionUDP acts just like net.UDPConn.ReadFrom(), but returns a session object instead of a +// net.UDPAddr. +func ReadFromSessionUDP(conn *net.UDPConn, b []byte) (int, *SessionUDP, error) { + n, raddr, err := conn.ReadFrom(b) + if err != nil { + return n, nil, err + } + session := &SessionUDP{raddr.(*net.UDPAddr)} + return n, session, err +} + +// WriteToSessionUDP acts just like net.UDPConn.WritetTo(), but uses a *SessionUDP instead of a net.Addr. +func WriteToSessionUDP(conn *net.UDPConn, b []byte, session *SessionUDP) (int, error) { + n, err := conn.WriteTo(b, session.raddr) + return n, err +} + +func (s *SessionUDP) RemoteAddr() net.Addr { return s.raddr } + +// setUDPSocketOptions sets the UDP socket options. +// This function is implemented on a per platform basis. See udp_*.go for more details +func setUDPSocketOptions(conn *net.UDPConn) error { + return nil +} diff --git a/vendor/github.com/miekg/dns/update.go b/vendor/github.com/miekg/dns/update.go new file mode 100644 index 00000000..e90c5c96 --- /dev/null +++ b/vendor/github.com/miekg/dns/update.go @@ -0,0 +1,106 @@ +package dns + +// NameUsed sets the RRs in the prereq section to +// "Name is in use" RRs. RFC 2136 section 2.4.4. +func (u *Msg) NameUsed(rr []RR) { + if u.Answer == nil { + u.Answer = make([]RR, 0, len(rr)) + } + for _, r := range rr { + u.Answer = append(u.Answer, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: TypeANY, Class: ClassANY}}) + } +} + +// NameNotUsed sets the RRs in the prereq section to +// "Name is in not use" RRs. RFC 2136 section 2.4.5. +func (u *Msg) NameNotUsed(rr []RR) { + if u.Answer == nil { + u.Answer = make([]RR, 0, len(rr)) + } + for _, r := range rr { + u.Answer = append(u.Answer, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: TypeANY, Class: ClassNONE}}) + } +} + +// Used sets the RRs in the prereq section to +// "RRset exists (value dependent -- with rdata)" RRs. RFC 2136 section 2.4.2. +func (u *Msg) Used(rr []RR) { + if len(u.Question) == 0 { + panic("dns: empty question section") + } + if u.Answer == nil { + u.Answer = make([]RR, 0, len(rr)) + } + for _, r := range rr { + r.Header().Class = u.Question[0].Qclass + u.Answer = append(u.Answer, r) + } +} + +// RRsetUsed sets the RRs in the prereq section to +// "RRset exists (value independent -- no rdata)" RRs. RFC 2136 section 2.4.1. +func (u *Msg) RRsetUsed(rr []RR) { + if u.Answer == nil { + u.Answer = make([]RR, 0, len(rr)) + } + for _, r := range rr { + u.Answer = append(u.Answer, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: r.Header().Rrtype, Class: ClassANY}}) + } +} + +// RRsetNotUsed sets the RRs in the prereq section to +// "RRset does not exist" RRs. RFC 2136 section 2.4.3. +func (u *Msg) RRsetNotUsed(rr []RR) { + if u.Answer == nil { + u.Answer = make([]RR, 0, len(rr)) + } + for _, r := range rr { + u.Answer = append(u.Answer, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: r.Header().Rrtype, Class: ClassNONE}}) + } +} + +// Insert creates a dynamic update packet that adds an complete RRset, see RFC 2136 section 2.5.1. +func (u *Msg) Insert(rr []RR) { + if len(u.Question) == 0 { + panic("dns: empty question section") + } + if u.Ns == nil { + u.Ns = make([]RR, 0, len(rr)) + } + for _, r := range rr { + r.Header().Class = u.Question[0].Qclass + u.Ns = append(u.Ns, r) + } +} + +// RemoveRRset creates a dynamic update packet that deletes an RRset, see RFC 2136 section 2.5.2. +func (u *Msg) RemoveRRset(rr []RR) { + if u.Ns == nil { + u.Ns = make([]RR, 0, len(rr)) + } + for _, r := range rr { + u.Ns = append(u.Ns, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: r.Header().Rrtype, Class: ClassANY}}) + } +} + +// RemoveName creates a dynamic update packet that deletes all RRsets of a name, see RFC 2136 section 2.5.3 +func (u *Msg) RemoveName(rr []RR) { + if u.Ns == nil { + u.Ns = make([]RR, 0, len(rr)) + } + for _, r := range rr { + u.Ns = append(u.Ns, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: TypeANY, Class: ClassANY}}) + } +} + +// Remove creates a dynamic update packet deletes RR from a RRSset, see RFC 2136 section 2.5.4 +func (u *Msg) Remove(rr []RR) { + if u.Ns == nil { + u.Ns = make([]RR, 0, len(rr)) + } + for _, r := range rr { + r.Header().Class = ClassNONE + r.Header().Ttl = 0 + u.Ns = append(u.Ns, r) + } +} diff --git a/vendor/github.com/miekg/dns/xfr.go b/vendor/github.com/miekg/dns/xfr.go new file mode 100644 index 00000000..7346deff --- /dev/null +++ b/vendor/github.com/miekg/dns/xfr.go @@ -0,0 +1,244 @@ +package dns + +import ( + "time" +) + +// Envelope is used when doing a zone transfer with a remote server. +type Envelope struct { + RR []RR // The set of RRs in the answer section of the xfr reply message. + Error error // If something went wrong, this contains the error. +} + +// A Transfer defines parameters that are used during a zone transfer. +type Transfer struct { + *Conn + DialTimeout time.Duration // net.DialTimeout, defaults to 2 seconds + ReadTimeout time.Duration // net.Conn.SetReadTimeout value for connections, defaults to 2 seconds + WriteTimeout time.Duration // net.Conn.SetWriteTimeout value for connections, defaults to 2 seconds + TsigSecret map[string]string // Secret(s) for Tsig map[], zonename must be fully qualified + tsigTimersOnly bool +} + +// Think we need to away to stop the transfer + +// In performs an incoming transfer with the server in a. +// If you would like to set the source IP, or some other attribute +// of a Dialer for a Transfer, you can do so by specifying the attributes +// in the Transfer.Conn: +// +// d := net.Dialer{LocalAddr: transfer_source} +// con, err := d.Dial("tcp", master) +// dnscon := &dns.Conn{Conn:con} +// transfer = &dns.Transfer{Conn: dnscon} +// channel, err := transfer.In(message, master) +// +func (t *Transfer) In(q *Msg, a string) (env chan *Envelope, err error) { + timeout := dnsTimeout + if t.DialTimeout != 0 { + timeout = t.DialTimeout + } + if t.Conn == nil { + t.Conn, err = DialTimeout("tcp", a, timeout) + if err != nil { + return nil, err + } + } + if err := t.WriteMsg(q); err != nil { + return nil, err + } + env = make(chan *Envelope) + go func() { + if q.Question[0].Qtype == TypeAXFR { + go t.inAxfr(q.Id, env) + return + } + if q.Question[0].Qtype == TypeIXFR { + go t.inIxfr(q.Id, env) + return + } + }() + return env, nil +} + +func (t *Transfer) inAxfr(id uint16, c chan *Envelope) { + first := true + defer t.Close() + defer close(c) + timeout := dnsTimeout + if t.ReadTimeout != 0 { + timeout = t.ReadTimeout + } + for { + t.Conn.SetReadDeadline(time.Now().Add(timeout)) + in, err := t.ReadMsg() + if err != nil { + c <- &Envelope{nil, err} + return + } + if id != in.Id { + c <- &Envelope{in.Answer, ErrId} + return + } + if first { + if !isSOAFirst(in) { + c <- &Envelope{in.Answer, ErrSoa} + return + } + first = !first + // only one answer that is SOA, receive more + if len(in.Answer) == 1 { + t.tsigTimersOnly = true + c <- &Envelope{in.Answer, nil} + continue + } + } + + if !first { + t.tsigTimersOnly = true // Subsequent envelopes use this. + if isSOALast(in) { + c <- &Envelope{in.Answer, nil} + return + } + c <- &Envelope{in.Answer, nil} + } + } +} + +func (t *Transfer) inIxfr(id uint16, c chan *Envelope) { + serial := uint32(0) // The first serial seen is the current server serial + first := true + defer t.Close() + defer close(c) + timeout := dnsTimeout + if t.ReadTimeout != 0 { + timeout = t.ReadTimeout + } + for { + t.SetReadDeadline(time.Now().Add(timeout)) + in, err := t.ReadMsg() + if err != nil { + c <- &Envelope{nil, err} + return + } + if id != in.Id { + c <- &Envelope{in.Answer, ErrId} + return + } + if first { + // A single SOA RR signals "no changes" + if len(in.Answer) == 1 && isSOAFirst(in) { + c <- &Envelope{in.Answer, nil} + return + } + + // Check if the returned answer is ok + if !isSOAFirst(in) { + c <- &Envelope{in.Answer, ErrSoa} + return + } + // This serial is important + serial = in.Answer[0].(*SOA).Serial + first = !first + } + + // Now we need to check each message for SOA records, to see what we need to do + if !first { + t.tsigTimersOnly = true + // If the last record in the IXFR contains the servers' SOA, we should quit + if v, ok := in.Answer[len(in.Answer)-1].(*SOA); ok { + if v.Serial == serial { + c <- &Envelope{in.Answer, nil} + return + } + } + c <- &Envelope{in.Answer, nil} + } + } +} + +// Out performs an outgoing transfer with the client connecting in w. +// Basic use pattern: +// +// ch := make(chan *dns.Envelope) +// tr := new(dns.Transfer) +// go tr.Out(w, r, ch) +// ch <- &dns.Envelope{RR: []dns.RR{soa, rr1, rr2, rr3, soa}} +// close(ch) +// w.Hijack() +// // w.Close() // Client closes connection +// +// The server is responsible for sending the correct sequence of RRs through the +// channel ch. +func (t *Transfer) Out(w ResponseWriter, q *Msg, ch chan *Envelope) error { + for x := range ch { + r := new(Msg) + // Compress? + r.SetReply(q) + r.Authoritative = true + // assume it fits TODO(miek): fix + r.Answer = append(r.Answer, x.RR...) + if err := w.WriteMsg(r); err != nil { + return err + } + } + w.TsigTimersOnly(true) + return nil +} + +// ReadMsg reads a message from the transfer connection t. +func (t *Transfer) ReadMsg() (*Msg, error) { + m := new(Msg) + p := make([]byte, MaxMsgSize) + n, err := t.Read(p) + if err != nil && n == 0 { + return nil, err + } + p = p[:n] + if err := m.Unpack(p); err != nil { + return nil, err + } + if ts := m.IsTsig(); ts != nil && t.TsigSecret != nil { + if _, ok := t.TsigSecret[ts.Hdr.Name]; !ok { + return m, ErrSecret + } + // Need to work on the original message p, as that was used to calculate the tsig. + err = TsigVerify(p, t.TsigSecret[ts.Hdr.Name], t.tsigRequestMAC, t.tsigTimersOnly) + t.tsigRequestMAC = ts.MAC + } + return m, err +} + +// WriteMsg writes a message through the transfer connection t. +func (t *Transfer) WriteMsg(m *Msg) (err error) { + var out []byte + if ts := m.IsTsig(); ts != nil && t.TsigSecret != nil { + if _, ok := t.TsigSecret[ts.Hdr.Name]; !ok { + return ErrSecret + } + out, t.tsigRequestMAC, err = TsigGenerate(m, t.TsigSecret[ts.Hdr.Name], t.tsigRequestMAC, t.tsigTimersOnly) + } else { + out, err = m.Pack() + } + if err != nil { + return err + } + if _, err = t.Write(out); err != nil { + return err + } + return nil +} + +func isSOAFirst(in *Msg) bool { + if len(in.Answer) > 0 { + return in.Answer[0].Header().Rrtype == TypeSOA + } + return false +} + +func isSOALast(in *Msg) bool { + if len(in.Answer) > 0 { + return in.Answer[len(in.Answer)-1].Header().Rrtype == TypeSOA + } + return false +} diff --git a/vendor/github.com/miekg/dns/zmsg.go b/vendor/github.com/miekg/dns/zmsg.go new file mode 100644 index 00000000..c561370e --- /dev/null +++ b/vendor/github.com/miekg/dns/zmsg.go @@ -0,0 +1,3529 @@ +// *** DO NOT MODIFY *** +// AUTOGENERATED BY go generate from msg_generate.go + +package dns + +// pack*() functions + +func (rr *A) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packDataA(rr.A, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *AAAA) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packDataAAAA(rr.AAAA, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *AFSDB) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.Subtype, msg, off) + if err != nil { + return off, err + } + off, err = PackDomainName(rr.Hostname, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *ANY) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *CAA) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint8(rr.Flag, msg, off) + if err != nil { + return off, err + } + off, err = packString(rr.Tag, msg, off) + if err != nil { + return off, err + } + off, err = packStringOctet(rr.Value, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *CDNSKEY) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.Flags, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Protocol, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packStringBase64(rr.PublicKey, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *CDS) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.KeyTag, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.DigestType, msg, off) + if err != nil { + return off, err + } + off, err = packStringHex(rr.Digest, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *CERT) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.Type, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.KeyTag, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packStringBase64(rr.Certificate, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *CNAME) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = PackDomainName(rr.Target, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *DHCID) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packStringBase64(rr.Digest, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *DLV) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.KeyTag, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.DigestType, msg, off) + if err != nil { + return off, err + } + off, err = packStringHex(rr.Digest, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *DNAME) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = PackDomainName(rr.Target, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *DNSKEY) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.Flags, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Protocol, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packStringBase64(rr.PublicKey, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *DS) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.KeyTag, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.DigestType, msg, off) + if err != nil { + return off, err + } + off, err = packStringHex(rr.Digest, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *EID) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packStringHex(rr.Endpoint, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *EUI48) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint48(rr.Address, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *EUI64) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint64(rr.Address, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *GID) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint32(rr.Gid, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *GPOS) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packString(rr.Longitude, msg, off) + if err != nil { + return off, err + } + off, err = packString(rr.Latitude, msg, off) + if err != nil { + return off, err + } + off, err = packString(rr.Altitude, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *HINFO) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packString(rr.Cpu, msg, off) + if err != nil { + return off, err + } + off, err = packString(rr.Os, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *HIP) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint8(rr.HitLength, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.PublicKeyAlgorithm, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.PublicKeyLength, msg, off) + if err != nil { + return off, err + } + off, err = packStringHex(rr.Hit, msg, off) + if err != nil { + return off, err + } + off, err = packStringBase64(rr.PublicKey, msg, off) + if err != nil { + return off, err + } + off, err = packDataDomainNames(rr.RendezvousServers, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *KEY) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.Flags, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Protocol, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packStringBase64(rr.PublicKey, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *KX) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.Preference, msg, off) + if err != nil { + return off, err + } + off, err = PackDomainName(rr.Exchanger, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *L32) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.Preference, msg, off) + if err != nil { + return off, err + } + off, err = packDataA(rr.Locator32, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *L64) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.Preference, msg, off) + if err != nil { + return off, err + } + off, err = packUint64(rr.Locator64, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *LOC) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint8(rr.Version, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Size, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.HorizPre, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.VertPre, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.Latitude, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.Longitude, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.Altitude, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *LP) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.Preference, msg, off) + if err != nil { + return off, err + } + off, err = PackDomainName(rr.Fqdn, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *MB) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = PackDomainName(rr.Mb, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *MD) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = PackDomainName(rr.Md, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *MF) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = PackDomainName(rr.Mf, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *MG) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = PackDomainName(rr.Mg, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *MINFO) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = PackDomainName(rr.Rmail, msg, off, compression, compress) + if err != nil { + return off, err + } + off, err = PackDomainName(rr.Email, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *MR) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = PackDomainName(rr.Mr, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *MX) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.Preference, msg, off) + if err != nil { + return off, err + } + off, err = PackDomainName(rr.Mx, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *NAPTR) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.Order, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.Preference, msg, off) + if err != nil { + return off, err + } + off, err = packString(rr.Flags, msg, off) + if err != nil { + return off, err + } + off, err = packString(rr.Service, msg, off) + if err != nil { + return off, err + } + off, err = packString(rr.Regexp, msg, off) + if err != nil { + return off, err + } + off, err = PackDomainName(rr.Replacement, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *NID) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.Preference, msg, off) + if err != nil { + return off, err + } + off, err = packUint64(rr.NodeID, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *NIMLOC) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packStringHex(rr.Locator, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *NINFO) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packStringTxt(rr.ZSData, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *NS) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = PackDomainName(rr.Ns, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *NSAPPTR) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = PackDomainName(rr.Ptr, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *NSEC) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = PackDomainName(rr.NextDomain, msg, off, compression, compress) + if err != nil { + return off, err + } + off, err = packDataNsec(rr.TypeBitMap, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *NSEC3) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint8(rr.Hash, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Flags, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.Iterations, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.SaltLength, msg, off) + if err != nil { + return off, err + } + if rr.Salt == "-" { /* do nothing, empty salt */ + } + if err != nil { + return off, err + } + off, err = packUint8(rr.HashLength, msg, off) + if err != nil { + return off, err + } + off, err = packStringBase32(rr.NextDomain, msg, off) + if err != nil { + return off, err + } + off, err = packDataNsec(rr.TypeBitMap, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *NSEC3PARAM) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint8(rr.Hash, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Flags, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.Iterations, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.SaltLength, msg, off) + if err != nil { + return off, err + } + if rr.Salt == "-" { /* do nothing, empty salt */ + } + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *OPENPGPKEY) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packStringBase64(rr.PublicKey, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *OPT) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packDataOpt(rr.Option, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *PTR) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = PackDomainName(rr.Ptr, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *PX) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.Preference, msg, off) + if err != nil { + return off, err + } + off, err = PackDomainName(rr.Map822, msg, off, compression, compress) + if err != nil { + return off, err + } + off, err = PackDomainName(rr.Mapx400, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *RFC3597) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packStringHex(rr.Rdata, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *RKEY) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.Flags, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Protocol, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packStringBase64(rr.PublicKey, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *RP) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = PackDomainName(rr.Mbox, msg, off, compression, compress) + if err != nil { + return off, err + } + off, err = PackDomainName(rr.Txt, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *RRSIG) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.TypeCovered, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Labels, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.OrigTtl, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.Expiration, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.Inception, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.KeyTag, msg, off) + if err != nil { + return off, err + } + off, err = PackDomainName(rr.SignerName, msg, off, compression, compress) + if err != nil { + return off, err + } + off, err = packStringBase64(rr.Signature, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *RT) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.Preference, msg, off) + if err != nil { + return off, err + } + off, err = PackDomainName(rr.Host, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *SIG) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.TypeCovered, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Labels, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.OrigTtl, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.Expiration, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.Inception, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.KeyTag, msg, off) + if err != nil { + return off, err + } + off, err = PackDomainName(rr.SignerName, msg, off, compression, compress) + if err != nil { + return off, err + } + off, err = packStringBase64(rr.Signature, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *SMIMEA) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint8(rr.Usage, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Selector, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.MatchingType, msg, off) + if err != nil { + return off, err + } + off, err = packStringHex(rr.Certificate, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *SOA) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = PackDomainName(rr.Ns, msg, off, compression, compress) + if err != nil { + return off, err + } + off, err = PackDomainName(rr.Mbox, msg, off, compression, compress) + if err != nil { + return off, err + } + off, err = packUint32(rr.Serial, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.Refresh, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.Retry, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.Expire, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.Minttl, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *SPF) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packStringTxt(rr.Txt, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *SRV) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.Priority, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.Weight, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.Port, msg, off) + if err != nil { + return off, err + } + off, err = PackDomainName(rr.Target, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *SSHFP) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint8(rr.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Type, msg, off) + if err != nil { + return off, err + } + off, err = packStringHex(rr.FingerPrint, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *TA) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.KeyTag, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.DigestType, msg, off) + if err != nil { + return off, err + } + off, err = packStringHex(rr.Digest, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *TALINK) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = PackDomainName(rr.PreviousName, msg, off, compression, compress) + if err != nil { + return off, err + } + off, err = PackDomainName(rr.NextName, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *TKEY) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = PackDomainName(rr.Algorithm, msg, off, compression, compress) + if err != nil { + return off, err + } + off, err = packUint32(rr.Inception, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.Expiration, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.Mode, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.Error, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.KeySize, msg, off) + if err != nil { + return off, err + } + off, err = packString(rr.Key, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.OtherLen, msg, off) + if err != nil { + return off, err + } + off, err = packString(rr.OtherData, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *TLSA) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint8(rr.Usage, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Selector, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.MatchingType, msg, off) + if err != nil { + return off, err + } + off, err = packStringHex(rr.Certificate, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *TSIG) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = PackDomainName(rr.Algorithm, msg, off, compression, compress) + if err != nil { + return off, err + } + off, err = packUint48(rr.TimeSigned, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.Fudge, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.MACSize, msg, off) + if err != nil { + return off, err + } + off, err = packStringHex(rr.MAC, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.OrigId, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.Error, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.OtherLen, msg, off) + if err != nil { + return off, err + } + off, err = packStringHex(rr.OtherData, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *TXT) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packStringTxt(rr.Txt, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *UID) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint32(rr.Uid, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *UINFO) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packString(rr.Uinfo, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *URI) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.Priority, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.Weight, msg, off) + if err != nil { + return off, err + } + off, err = packStringOctet(rr.Target, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *X25) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packString(rr.PSDNAddress, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +// unpack*() functions + +func unpackA(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(A) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.A, off, err = unpackDataA(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackAAAA(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(AAAA) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.AAAA, off, err = unpackDataAAAA(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackAFSDB(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(AFSDB) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Subtype, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Hostname, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackANY(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(ANY) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + return rr, off, err +} + +func unpackCAA(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(CAA) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Flag, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Tag, off, err = unpackString(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Value, off, err = unpackStringOctet(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackCDNSKEY(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(CDNSKEY) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Flags, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Protocol, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Algorithm, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.PublicKey, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackCDS(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(CDS) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.KeyTag, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Algorithm, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.DigestType, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Digest, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackCERT(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(CERT) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Type, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.KeyTag, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Algorithm, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Certificate, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackCNAME(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(CNAME) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Target, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackDHCID(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(DHCID) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Digest, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackDLV(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(DLV) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.KeyTag, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Algorithm, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.DigestType, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Digest, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackDNAME(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(DNAME) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Target, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackDNSKEY(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(DNSKEY) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Flags, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Protocol, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Algorithm, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.PublicKey, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackDS(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(DS) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.KeyTag, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Algorithm, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.DigestType, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Digest, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackEID(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(EID) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Endpoint, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackEUI48(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(EUI48) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Address, off, err = unpackUint48(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackEUI64(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(EUI64) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Address, off, err = unpackUint64(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackGID(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(GID) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Gid, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackGPOS(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(GPOS) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Longitude, off, err = unpackString(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Latitude, off, err = unpackString(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Altitude, off, err = unpackString(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackHINFO(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(HINFO) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Cpu, off, err = unpackString(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Os, off, err = unpackString(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackHIP(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(HIP) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.HitLength, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.PublicKeyAlgorithm, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.PublicKeyLength, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Hit, off, err = unpackStringHex(msg, off, off+int(rr.HitLength)) + if err != nil { + return rr, off, err + } + rr.PublicKey, off, err = unpackStringBase64(msg, off, off+int(rr.PublicKeyLength)) + if err != nil { + return rr, off, err + } + rr.RendezvousServers, off, err = unpackDataDomainNames(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackKEY(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(KEY) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Flags, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Protocol, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Algorithm, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.PublicKey, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackKX(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(KX) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Preference, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Exchanger, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackL32(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(L32) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Preference, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Locator32, off, err = unpackDataA(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackL64(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(L64) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Preference, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Locator64, off, err = unpackUint64(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackLOC(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(LOC) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Version, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Size, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.HorizPre, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.VertPre, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Latitude, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Longitude, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Altitude, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackLP(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(LP) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Preference, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Fqdn, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackMB(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(MB) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Mb, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackMD(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(MD) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Md, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackMF(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(MF) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Mf, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackMG(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(MG) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Mg, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackMINFO(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(MINFO) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Rmail, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Email, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackMR(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(MR) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Mr, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackMX(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(MX) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Preference, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Mx, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackNAPTR(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(NAPTR) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Order, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Preference, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Flags, off, err = unpackString(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Service, off, err = unpackString(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Regexp, off, err = unpackString(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Replacement, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackNID(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(NID) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Preference, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.NodeID, off, err = unpackUint64(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackNIMLOC(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(NIMLOC) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Locator, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackNINFO(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(NINFO) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.ZSData, off, err = unpackStringTxt(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackNS(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(NS) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Ns, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackNSAPPTR(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(NSAPPTR) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Ptr, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackNSEC(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(NSEC) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.NextDomain, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.TypeBitMap, off, err = unpackDataNsec(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackNSEC3(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(NSEC3) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Hash, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Flags, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Iterations, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.SaltLength, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Salt, off, err = unpackStringHex(msg, off, off+int(rr.SaltLength)) + if err != nil { + return rr, off, err + } + rr.HashLength, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.NextDomain, off, err = unpackStringBase32(msg, off, off+int(rr.HashLength)) + if err != nil { + return rr, off, err + } + rr.TypeBitMap, off, err = unpackDataNsec(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackNSEC3PARAM(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(NSEC3PARAM) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Hash, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Flags, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Iterations, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.SaltLength, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Salt, off, err = unpackStringHex(msg, off, off+int(rr.SaltLength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackOPENPGPKEY(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(OPENPGPKEY) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.PublicKey, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackOPT(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(OPT) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Option, off, err = unpackDataOpt(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackPTR(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(PTR) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Ptr, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackPX(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(PX) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Preference, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Map822, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Mapx400, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackRFC3597(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(RFC3597) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Rdata, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackRKEY(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(RKEY) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Flags, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Protocol, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Algorithm, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.PublicKey, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackRP(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(RP) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Mbox, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Txt, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackRRSIG(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(RRSIG) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.TypeCovered, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Algorithm, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Labels, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.OrigTtl, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Expiration, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Inception, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.KeyTag, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.SignerName, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Signature, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackRT(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(RT) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Preference, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Host, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackSIG(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(SIG) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.TypeCovered, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Algorithm, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Labels, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.OrigTtl, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Expiration, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Inception, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.KeyTag, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.SignerName, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Signature, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackSMIMEA(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(SMIMEA) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Usage, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Selector, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.MatchingType, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Certificate, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackSOA(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(SOA) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Ns, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Mbox, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Serial, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Refresh, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Retry, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Expire, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Minttl, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackSPF(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(SPF) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Txt, off, err = unpackStringTxt(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackSRV(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(SRV) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Priority, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Weight, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Port, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Target, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackSSHFP(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(SSHFP) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Algorithm, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Type, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.FingerPrint, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackTA(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(TA) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.KeyTag, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Algorithm, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.DigestType, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Digest, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackTALINK(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(TALINK) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.PreviousName, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.NextName, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackTKEY(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(TKEY) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Algorithm, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Inception, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Expiration, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Mode, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Error, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.KeySize, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Key, off, err = unpackString(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.OtherLen, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.OtherData, off, err = unpackString(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackTLSA(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(TLSA) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Usage, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Selector, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.MatchingType, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Certificate, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackTSIG(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(TSIG) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Algorithm, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.TimeSigned, off, err = unpackUint48(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Fudge, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.MACSize, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.MAC, off, err = unpackStringHex(msg, off, off+int(rr.MACSize)) + if err != nil { + return rr, off, err + } + rr.OrigId, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Error, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.OtherLen, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.OtherData, off, err = unpackStringHex(msg, off, off+int(rr.OtherLen)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackTXT(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(TXT) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Txt, off, err = unpackStringTxt(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackUID(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(UID) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Uid, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackUINFO(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(UINFO) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Uinfo, off, err = unpackString(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackURI(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(URI) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Priority, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Weight, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Target, off, err = unpackStringOctet(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackX25(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(X25) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.PSDNAddress, off, err = unpackString(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +var typeToUnpack = map[uint16]func(RR_Header, []byte, int) (RR, int, error){ + TypeA: unpackA, + TypeAAAA: unpackAAAA, + TypeAFSDB: unpackAFSDB, + TypeANY: unpackANY, + TypeCAA: unpackCAA, + TypeCDNSKEY: unpackCDNSKEY, + TypeCDS: unpackCDS, + TypeCERT: unpackCERT, + TypeCNAME: unpackCNAME, + TypeDHCID: unpackDHCID, + TypeDLV: unpackDLV, + TypeDNAME: unpackDNAME, + TypeDNSKEY: unpackDNSKEY, + TypeDS: unpackDS, + TypeEID: unpackEID, + TypeEUI48: unpackEUI48, + TypeEUI64: unpackEUI64, + TypeGID: unpackGID, + TypeGPOS: unpackGPOS, + TypeHINFO: unpackHINFO, + TypeHIP: unpackHIP, + TypeKEY: unpackKEY, + TypeKX: unpackKX, + TypeL32: unpackL32, + TypeL64: unpackL64, + TypeLOC: unpackLOC, + TypeLP: unpackLP, + TypeMB: unpackMB, + TypeMD: unpackMD, + TypeMF: unpackMF, + TypeMG: unpackMG, + TypeMINFO: unpackMINFO, + TypeMR: unpackMR, + TypeMX: unpackMX, + TypeNAPTR: unpackNAPTR, + TypeNID: unpackNID, + TypeNIMLOC: unpackNIMLOC, + TypeNINFO: unpackNINFO, + TypeNS: unpackNS, + TypeNSAPPTR: unpackNSAPPTR, + TypeNSEC: unpackNSEC, + TypeNSEC3: unpackNSEC3, + TypeNSEC3PARAM: unpackNSEC3PARAM, + TypeOPENPGPKEY: unpackOPENPGPKEY, + TypeOPT: unpackOPT, + TypePTR: unpackPTR, + TypePX: unpackPX, + TypeRKEY: unpackRKEY, + TypeRP: unpackRP, + TypeRRSIG: unpackRRSIG, + TypeRT: unpackRT, + TypeSIG: unpackSIG, + TypeSMIMEA: unpackSMIMEA, + TypeSOA: unpackSOA, + TypeSPF: unpackSPF, + TypeSRV: unpackSRV, + TypeSSHFP: unpackSSHFP, + TypeTA: unpackTA, + TypeTALINK: unpackTALINK, + TypeTKEY: unpackTKEY, + TypeTLSA: unpackTLSA, + TypeTSIG: unpackTSIG, + TypeTXT: unpackTXT, + TypeUID: unpackUID, + TypeUINFO: unpackUINFO, + TypeURI: unpackURI, + TypeX25: unpackX25, +} diff --git a/vendor/github.com/miekg/dns/ztypes.go b/vendor/github.com/miekg/dns/ztypes.go new file mode 100644 index 00000000..3c052773 --- /dev/null +++ b/vendor/github.com/miekg/dns/ztypes.go @@ -0,0 +1,842 @@ +// *** DO NOT MODIFY *** +// AUTOGENERATED BY go generate from type_generate.go + +package dns + +import ( + "encoding/base64" + "net" +) + +// TypeToRR is a map of constructors for each RR type. +var TypeToRR = map[uint16]func() RR{ + TypeA: func() RR { return new(A) }, + TypeAAAA: func() RR { return new(AAAA) }, + TypeAFSDB: func() RR { return new(AFSDB) }, + TypeANY: func() RR { return new(ANY) }, + TypeCAA: func() RR { return new(CAA) }, + TypeCDNSKEY: func() RR { return new(CDNSKEY) }, + TypeCDS: func() RR { return new(CDS) }, + TypeCERT: func() RR { return new(CERT) }, + TypeCNAME: func() RR { return new(CNAME) }, + TypeDHCID: func() RR { return new(DHCID) }, + TypeDLV: func() RR { return new(DLV) }, + TypeDNAME: func() RR { return new(DNAME) }, + TypeDNSKEY: func() RR { return new(DNSKEY) }, + TypeDS: func() RR { return new(DS) }, + TypeEID: func() RR { return new(EID) }, + TypeEUI48: func() RR { return new(EUI48) }, + TypeEUI64: func() RR { return new(EUI64) }, + TypeGID: func() RR { return new(GID) }, + TypeGPOS: func() RR { return new(GPOS) }, + TypeHINFO: func() RR { return new(HINFO) }, + TypeHIP: func() RR { return new(HIP) }, + TypeKEY: func() RR { return new(KEY) }, + TypeKX: func() RR { return new(KX) }, + TypeL32: func() RR { return new(L32) }, + TypeL64: func() RR { return new(L64) }, + TypeLOC: func() RR { return new(LOC) }, + TypeLP: func() RR { return new(LP) }, + TypeMB: func() RR { return new(MB) }, + TypeMD: func() RR { return new(MD) }, + TypeMF: func() RR { return new(MF) }, + TypeMG: func() RR { return new(MG) }, + TypeMINFO: func() RR { return new(MINFO) }, + TypeMR: func() RR { return new(MR) }, + TypeMX: func() RR { return new(MX) }, + TypeNAPTR: func() RR { return new(NAPTR) }, + TypeNID: func() RR { return new(NID) }, + TypeNIMLOC: func() RR { return new(NIMLOC) }, + TypeNINFO: func() RR { return new(NINFO) }, + TypeNS: func() RR { return new(NS) }, + TypeNSAPPTR: func() RR { return new(NSAPPTR) }, + TypeNSEC: func() RR { return new(NSEC) }, + TypeNSEC3: func() RR { return new(NSEC3) }, + TypeNSEC3PARAM: func() RR { return new(NSEC3PARAM) }, + TypeOPENPGPKEY: func() RR { return new(OPENPGPKEY) }, + TypeOPT: func() RR { return new(OPT) }, + TypePTR: func() RR { return new(PTR) }, + TypePX: func() RR { return new(PX) }, + TypeRKEY: func() RR { return new(RKEY) }, + TypeRP: func() RR { return new(RP) }, + TypeRRSIG: func() RR { return new(RRSIG) }, + TypeRT: func() RR { return new(RT) }, + TypeSIG: func() RR { return new(SIG) }, + TypeSMIMEA: func() RR { return new(SMIMEA) }, + TypeSOA: func() RR { return new(SOA) }, + TypeSPF: func() RR { return new(SPF) }, + TypeSRV: func() RR { return new(SRV) }, + TypeSSHFP: func() RR { return new(SSHFP) }, + TypeTA: func() RR { return new(TA) }, + TypeTALINK: func() RR { return new(TALINK) }, + TypeTKEY: func() RR { return new(TKEY) }, + TypeTLSA: func() RR { return new(TLSA) }, + TypeTSIG: func() RR { return new(TSIG) }, + TypeTXT: func() RR { return new(TXT) }, + TypeUID: func() RR { return new(UID) }, + TypeUINFO: func() RR { return new(UINFO) }, + TypeURI: func() RR { return new(URI) }, + TypeX25: func() RR { return new(X25) }, +} + +// TypeToString is a map of strings for each RR type. +var TypeToString = map[uint16]string{ + TypeA: "A", + TypeAAAA: "AAAA", + TypeAFSDB: "AFSDB", + TypeANY: "ANY", + TypeATMA: "ATMA", + TypeAXFR: "AXFR", + TypeCAA: "CAA", + TypeCDNSKEY: "CDNSKEY", + TypeCDS: "CDS", + TypeCERT: "CERT", + TypeCNAME: "CNAME", + TypeDHCID: "DHCID", + TypeDLV: "DLV", + TypeDNAME: "DNAME", + TypeDNSKEY: "DNSKEY", + TypeDS: "DS", + TypeEID: "EID", + TypeEUI48: "EUI48", + TypeEUI64: "EUI64", + TypeGID: "GID", + TypeGPOS: "GPOS", + TypeHINFO: "HINFO", + TypeHIP: "HIP", + TypeISDN: "ISDN", + TypeIXFR: "IXFR", + TypeKEY: "KEY", + TypeKX: "KX", + TypeL32: "L32", + TypeL64: "L64", + TypeLOC: "LOC", + TypeLP: "LP", + TypeMAILA: "MAILA", + TypeMAILB: "MAILB", + TypeMB: "MB", + TypeMD: "MD", + TypeMF: "MF", + TypeMG: "MG", + TypeMINFO: "MINFO", + TypeMR: "MR", + TypeMX: "MX", + TypeNAPTR: "NAPTR", + TypeNID: "NID", + TypeNIMLOC: "NIMLOC", + TypeNINFO: "NINFO", + TypeNS: "NS", + TypeNSEC: "NSEC", + TypeNSEC3: "NSEC3", + TypeNSEC3PARAM: "NSEC3PARAM", + TypeNULL: "NULL", + TypeNXT: "NXT", + TypeNone: "None", + TypeOPENPGPKEY: "OPENPGPKEY", + TypeOPT: "OPT", + TypePTR: "PTR", + TypePX: "PX", + TypeRKEY: "RKEY", + TypeRP: "RP", + TypeRRSIG: "RRSIG", + TypeRT: "RT", + TypeReserved: "Reserved", + TypeSIG: "SIG", + TypeSMIMEA: "SMIMEA", + TypeSOA: "SOA", + TypeSPF: "SPF", + TypeSRV: "SRV", + TypeSSHFP: "SSHFP", + TypeTA: "TA", + TypeTALINK: "TALINK", + TypeTKEY: "TKEY", + TypeTLSA: "TLSA", + TypeTSIG: "TSIG", + TypeTXT: "TXT", + TypeUID: "UID", + TypeUINFO: "UINFO", + TypeUNSPEC: "UNSPEC", + TypeURI: "URI", + TypeX25: "X25", + TypeNSAPPTR: "NSAP-PTR", +} + +// Header() functions +func (rr *A) Header() *RR_Header { return &rr.Hdr } +func (rr *AAAA) Header() *RR_Header { return &rr.Hdr } +func (rr *AFSDB) Header() *RR_Header { return &rr.Hdr } +func (rr *ANY) Header() *RR_Header { return &rr.Hdr } +func (rr *CAA) Header() *RR_Header { return &rr.Hdr } +func (rr *CDNSKEY) Header() *RR_Header { return &rr.Hdr } +func (rr *CDS) Header() *RR_Header { return &rr.Hdr } +func (rr *CERT) Header() *RR_Header { return &rr.Hdr } +func (rr *CNAME) Header() *RR_Header { return &rr.Hdr } +func (rr *DHCID) Header() *RR_Header { return &rr.Hdr } +func (rr *DLV) Header() *RR_Header { return &rr.Hdr } +func (rr *DNAME) Header() *RR_Header { return &rr.Hdr } +func (rr *DNSKEY) Header() *RR_Header { return &rr.Hdr } +func (rr *DS) Header() *RR_Header { return &rr.Hdr } +func (rr *EID) Header() *RR_Header { return &rr.Hdr } +func (rr *EUI48) Header() *RR_Header { return &rr.Hdr } +func (rr *EUI64) Header() *RR_Header { return &rr.Hdr } +func (rr *GID) Header() *RR_Header { return &rr.Hdr } +func (rr *GPOS) Header() *RR_Header { return &rr.Hdr } +func (rr *HINFO) Header() *RR_Header { return &rr.Hdr } +func (rr *HIP) Header() *RR_Header { return &rr.Hdr } +func (rr *KEY) Header() *RR_Header { return &rr.Hdr } +func (rr *KX) Header() *RR_Header { return &rr.Hdr } +func (rr *L32) Header() *RR_Header { return &rr.Hdr } +func (rr *L64) Header() *RR_Header { return &rr.Hdr } +func (rr *LOC) Header() *RR_Header { return &rr.Hdr } +func (rr *LP) Header() *RR_Header { return &rr.Hdr } +func (rr *MB) Header() *RR_Header { return &rr.Hdr } +func (rr *MD) Header() *RR_Header { return &rr.Hdr } +func (rr *MF) Header() *RR_Header { return &rr.Hdr } +func (rr *MG) Header() *RR_Header { return &rr.Hdr } +func (rr *MINFO) Header() *RR_Header { return &rr.Hdr } +func (rr *MR) Header() *RR_Header { return &rr.Hdr } +func (rr *MX) Header() *RR_Header { return &rr.Hdr } +func (rr *NAPTR) Header() *RR_Header { return &rr.Hdr } +func (rr *NID) Header() *RR_Header { return &rr.Hdr } +func (rr *NIMLOC) Header() *RR_Header { return &rr.Hdr } +func (rr *NINFO) Header() *RR_Header { return &rr.Hdr } +func (rr *NS) Header() *RR_Header { return &rr.Hdr } +func (rr *NSAPPTR) Header() *RR_Header { return &rr.Hdr } +func (rr *NSEC) Header() *RR_Header { return &rr.Hdr } +func (rr *NSEC3) Header() *RR_Header { return &rr.Hdr } +func (rr *NSEC3PARAM) Header() *RR_Header { return &rr.Hdr } +func (rr *OPENPGPKEY) Header() *RR_Header { return &rr.Hdr } +func (rr *OPT) Header() *RR_Header { return &rr.Hdr } +func (rr *PTR) Header() *RR_Header { return &rr.Hdr } +func (rr *PX) Header() *RR_Header { return &rr.Hdr } +func (rr *RFC3597) Header() *RR_Header { return &rr.Hdr } +func (rr *RKEY) Header() *RR_Header { return &rr.Hdr } +func (rr *RP) Header() *RR_Header { return &rr.Hdr } +func (rr *RRSIG) Header() *RR_Header { return &rr.Hdr } +func (rr *RT) Header() *RR_Header { return &rr.Hdr } +func (rr *SIG) Header() *RR_Header { return &rr.Hdr } +func (rr *SMIMEA) Header() *RR_Header { return &rr.Hdr } +func (rr *SOA) Header() *RR_Header { return &rr.Hdr } +func (rr *SPF) Header() *RR_Header { return &rr.Hdr } +func (rr *SRV) Header() *RR_Header { return &rr.Hdr } +func (rr *SSHFP) Header() *RR_Header { return &rr.Hdr } +func (rr *TA) Header() *RR_Header { return &rr.Hdr } +func (rr *TALINK) Header() *RR_Header { return &rr.Hdr } +func (rr *TKEY) Header() *RR_Header { return &rr.Hdr } +func (rr *TLSA) Header() *RR_Header { return &rr.Hdr } +func (rr *TSIG) Header() *RR_Header { return &rr.Hdr } +func (rr *TXT) Header() *RR_Header { return &rr.Hdr } +func (rr *UID) Header() *RR_Header { return &rr.Hdr } +func (rr *UINFO) Header() *RR_Header { return &rr.Hdr } +func (rr *URI) Header() *RR_Header { return &rr.Hdr } +func (rr *X25) Header() *RR_Header { return &rr.Hdr } + +// len() functions +func (rr *A) len() int { + l := rr.Hdr.len() + l += net.IPv4len // A + return l +} +func (rr *AAAA) len() int { + l := rr.Hdr.len() + l += net.IPv6len // AAAA + return l +} +func (rr *AFSDB) len() int { + l := rr.Hdr.len() + l += 2 // Subtype + l += len(rr.Hostname) + 1 + return l +} +func (rr *ANY) len() int { + l := rr.Hdr.len() + return l +} +func (rr *CAA) len() int { + l := rr.Hdr.len() + l += 1 // Flag + l += len(rr.Tag) + 1 + l += len(rr.Value) + return l +} +func (rr *CERT) len() int { + l := rr.Hdr.len() + l += 2 // Type + l += 2 // KeyTag + l += 1 // Algorithm + l += base64.StdEncoding.DecodedLen(len(rr.Certificate)) + return l +} +func (rr *CNAME) len() int { + l := rr.Hdr.len() + l += len(rr.Target) + 1 + return l +} +func (rr *DHCID) len() int { + l := rr.Hdr.len() + l += base64.StdEncoding.DecodedLen(len(rr.Digest)) + return l +} +func (rr *DNAME) len() int { + l := rr.Hdr.len() + l += len(rr.Target) + 1 + return l +} +func (rr *DNSKEY) len() int { + l := rr.Hdr.len() + l += 2 // Flags + l += 1 // Protocol + l += 1 // Algorithm + l += base64.StdEncoding.DecodedLen(len(rr.PublicKey)) + return l +} +func (rr *DS) len() int { + l := rr.Hdr.len() + l += 2 // KeyTag + l += 1 // Algorithm + l += 1 // DigestType + l += len(rr.Digest)/2 + 1 + return l +} +func (rr *EID) len() int { + l := rr.Hdr.len() + l += len(rr.Endpoint)/2 + 1 + return l +} +func (rr *EUI48) len() int { + l := rr.Hdr.len() + l += 6 // Address + return l +} +func (rr *EUI64) len() int { + l := rr.Hdr.len() + l += 8 // Address + return l +} +func (rr *GID) len() int { + l := rr.Hdr.len() + l += 4 // Gid + return l +} +func (rr *GPOS) len() int { + l := rr.Hdr.len() + l += len(rr.Longitude) + 1 + l += len(rr.Latitude) + 1 + l += len(rr.Altitude) + 1 + return l +} +func (rr *HINFO) len() int { + l := rr.Hdr.len() + l += len(rr.Cpu) + 1 + l += len(rr.Os) + 1 + return l +} +func (rr *HIP) len() int { + l := rr.Hdr.len() + l += 1 // HitLength + l += 1 // PublicKeyAlgorithm + l += 2 // PublicKeyLength + l += len(rr.Hit)/2 + 1 + l += base64.StdEncoding.DecodedLen(len(rr.PublicKey)) + for _, x := range rr.RendezvousServers { + l += len(x) + 1 + } + return l +} +func (rr *KX) len() int { + l := rr.Hdr.len() + l += 2 // Preference + l += len(rr.Exchanger) + 1 + return l +} +func (rr *L32) len() int { + l := rr.Hdr.len() + l += 2 // Preference + l += net.IPv4len // Locator32 + return l +} +func (rr *L64) len() int { + l := rr.Hdr.len() + l += 2 // Preference + l += 8 // Locator64 + return l +} +func (rr *LOC) len() int { + l := rr.Hdr.len() + l += 1 // Version + l += 1 // Size + l += 1 // HorizPre + l += 1 // VertPre + l += 4 // Latitude + l += 4 // Longitude + l += 4 // Altitude + return l +} +func (rr *LP) len() int { + l := rr.Hdr.len() + l += 2 // Preference + l += len(rr.Fqdn) + 1 + return l +} +func (rr *MB) len() int { + l := rr.Hdr.len() + l += len(rr.Mb) + 1 + return l +} +func (rr *MD) len() int { + l := rr.Hdr.len() + l += len(rr.Md) + 1 + return l +} +func (rr *MF) len() int { + l := rr.Hdr.len() + l += len(rr.Mf) + 1 + return l +} +func (rr *MG) len() int { + l := rr.Hdr.len() + l += len(rr.Mg) + 1 + return l +} +func (rr *MINFO) len() int { + l := rr.Hdr.len() + l += len(rr.Rmail) + 1 + l += len(rr.Email) + 1 + return l +} +func (rr *MR) len() int { + l := rr.Hdr.len() + l += len(rr.Mr) + 1 + return l +} +func (rr *MX) len() int { + l := rr.Hdr.len() + l += 2 // Preference + l += len(rr.Mx) + 1 + return l +} +func (rr *NAPTR) len() int { + l := rr.Hdr.len() + l += 2 // Order + l += 2 // Preference + l += len(rr.Flags) + 1 + l += len(rr.Service) + 1 + l += len(rr.Regexp) + 1 + l += len(rr.Replacement) + 1 + return l +} +func (rr *NID) len() int { + l := rr.Hdr.len() + l += 2 // Preference + l += 8 // NodeID + return l +} +func (rr *NIMLOC) len() int { + l := rr.Hdr.len() + l += len(rr.Locator)/2 + 1 + return l +} +func (rr *NINFO) len() int { + l := rr.Hdr.len() + for _, x := range rr.ZSData { + l += len(x) + 1 + } + return l +} +func (rr *NS) len() int { + l := rr.Hdr.len() + l += len(rr.Ns) + 1 + return l +} +func (rr *NSAPPTR) len() int { + l := rr.Hdr.len() + l += len(rr.Ptr) + 1 + return l +} +func (rr *NSEC3PARAM) len() int { + l := rr.Hdr.len() + l += 1 // Hash + l += 1 // Flags + l += 2 // Iterations + l += 1 // SaltLength + l += len(rr.Salt)/2 + 1 + return l +} +func (rr *OPENPGPKEY) len() int { + l := rr.Hdr.len() + l += base64.StdEncoding.DecodedLen(len(rr.PublicKey)) + return l +} +func (rr *PTR) len() int { + l := rr.Hdr.len() + l += len(rr.Ptr) + 1 + return l +} +func (rr *PX) len() int { + l := rr.Hdr.len() + l += 2 // Preference + l += len(rr.Map822) + 1 + l += len(rr.Mapx400) + 1 + return l +} +func (rr *RFC3597) len() int { + l := rr.Hdr.len() + l += len(rr.Rdata)/2 + 1 + return l +} +func (rr *RKEY) len() int { + l := rr.Hdr.len() + l += 2 // Flags + l += 1 // Protocol + l += 1 // Algorithm + l += base64.StdEncoding.DecodedLen(len(rr.PublicKey)) + return l +} +func (rr *RP) len() int { + l := rr.Hdr.len() + l += len(rr.Mbox) + 1 + l += len(rr.Txt) + 1 + return l +} +func (rr *RRSIG) len() int { + l := rr.Hdr.len() + l += 2 // TypeCovered + l += 1 // Algorithm + l += 1 // Labels + l += 4 // OrigTtl + l += 4 // Expiration + l += 4 // Inception + l += 2 // KeyTag + l += len(rr.SignerName) + 1 + l += base64.StdEncoding.DecodedLen(len(rr.Signature)) + return l +} +func (rr *RT) len() int { + l := rr.Hdr.len() + l += 2 // Preference + l += len(rr.Host) + 1 + return l +} +func (rr *SMIMEA) len() int { + l := rr.Hdr.len() + l += 1 // Usage + l += 1 // Selector + l += 1 // MatchingType + l += len(rr.Certificate)/2 + 1 + return l +} +func (rr *SOA) len() int { + l := rr.Hdr.len() + l += len(rr.Ns) + 1 + l += len(rr.Mbox) + 1 + l += 4 // Serial + l += 4 // Refresh + l += 4 // Retry + l += 4 // Expire + l += 4 // Minttl + return l +} +func (rr *SPF) len() int { + l := rr.Hdr.len() + for _, x := range rr.Txt { + l += len(x) + 1 + } + return l +} +func (rr *SRV) len() int { + l := rr.Hdr.len() + l += 2 // Priority + l += 2 // Weight + l += 2 // Port + l += len(rr.Target) + 1 + return l +} +func (rr *SSHFP) len() int { + l := rr.Hdr.len() + l += 1 // Algorithm + l += 1 // Type + l += len(rr.FingerPrint)/2 + 1 + return l +} +func (rr *TA) len() int { + l := rr.Hdr.len() + l += 2 // KeyTag + l += 1 // Algorithm + l += 1 // DigestType + l += len(rr.Digest)/2 + 1 + return l +} +func (rr *TALINK) len() int { + l := rr.Hdr.len() + l += len(rr.PreviousName) + 1 + l += len(rr.NextName) + 1 + return l +} +func (rr *TKEY) len() int { + l := rr.Hdr.len() + l += len(rr.Algorithm) + 1 + l += 4 // Inception + l += 4 // Expiration + l += 2 // Mode + l += 2 // Error + l += 2 // KeySize + l += len(rr.Key) + 1 + l += 2 // OtherLen + l += len(rr.OtherData) + 1 + return l +} +func (rr *TLSA) len() int { + l := rr.Hdr.len() + l += 1 // Usage + l += 1 // Selector + l += 1 // MatchingType + l += len(rr.Certificate)/2 + 1 + return l +} +func (rr *TSIG) len() int { + l := rr.Hdr.len() + l += len(rr.Algorithm) + 1 + l += 6 // TimeSigned + l += 2 // Fudge + l += 2 // MACSize + l += len(rr.MAC)/2 + 1 + l += 2 // OrigId + l += 2 // Error + l += 2 // OtherLen + l += len(rr.OtherData)/2 + 1 + return l +} +func (rr *TXT) len() int { + l := rr.Hdr.len() + for _, x := range rr.Txt { + l += len(x) + 1 + } + return l +} +func (rr *UID) len() int { + l := rr.Hdr.len() + l += 4 // Uid + return l +} +func (rr *UINFO) len() int { + l := rr.Hdr.len() + l += len(rr.Uinfo) + 1 + return l +} +func (rr *URI) len() int { + l := rr.Hdr.len() + l += 2 // Priority + l += 2 // Weight + l += len(rr.Target) + return l +} +func (rr *X25) len() int { + l := rr.Hdr.len() + l += len(rr.PSDNAddress) + 1 + return l +} + +// copy() functions +func (rr *A) copy() RR { + return &A{*rr.Hdr.copyHeader(), copyIP(rr.A)} +} +func (rr *AAAA) copy() RR { + return &AAAA{*rr.Hdr.copyHeader(), copyIP(rr.AAAA)} +} +func (rr *AFSDB) copy() RR { + return &AFSDB{*rr.Hdr.copyHeader(), rr.Subtype, rr.Hostname} +} +func (rr *ANY) copy() RR { + return &ANY{*rr.Hdr.copyHeader()} +} +func (rr *CAA) copy() RR { + return &CAA{*rr.Hdr.copyHeader(), rr.Flag, rr.Tag, rr.Value} +} +func (rr *CERT) copy() RR { + return &CERT{*rr.Hdr.copyHeader(), rr.Type, rr.KeyTag, rr.Algorithm, rr.Certificate} +} +func (rr *CNAME) copy() RR { + return &CNAME{*rr.Hdr.copyHeader(), rr.Target} +} +func (rr *DHCID) copy() RR { + return &DHCID{*rr.Hdr.copyHeader(), rr.Digest} +} +func (rr *DNAME) copy() RR { + return &DNAME{*rr.Hdr.copyHeader(), rr.Target} +} +func (rr *DNSKEY) copy() RR { + return &DNSKEY{*rr.Hdr.copyHeader(), rr.Flags, rr.Protocol, rr.Algorithm, rr.PublicKey} +} +func (rr *DS) copy() RR { + return &DS{*rr.Hdr.copyHeader(), rr.KeyTag, rr.Algorithm, rr.DigestType, rr.Digest} +} +func (rr *EID) copy() RR { + return &EID{*rr.Hdr.copyHeader(), rr.Endpoint} +} +func (rr *EUI48) copy() RR { + return &EUI48{*rr.Hdr.copyHeader(), rr.Address} +} +func (rr *EUI64) copy() RR { + return &EUI64{*rr.Hdr.copyHeader(), rr.Address} +} +func (rr *GID) copy() RR { + return &GID{*rr.Hdr.copyHeader(), rr.Gid} +} +func (rr *GPOS) copy() RR { + return &GPOS{*rr.Hdr.copyHeader(), rr.Longitude, rr.Latitude, rr.Altitude} +} +func (rr *HINFO) copy() RR { + return &HINFO{*rr.Hdr.copyHeader(), rr.Cpu, rr.Os} +} +func (rr *HIP) copy() RR { + RendezvousServers := make([]string, len(rr.RendezvousServers)) + copy(RendezvousServers, rr.RendezvousServers) + return &HIP{*rr.Hdr.copyHeader(), rr.HitLength, rr.PublicKeyAlgorithm, rr.PublicKeyLength, rr.Hit, rr.PublicKey, RendezvousServers} +} +func (rr *KX) copy() RR { + return &KX{*rr.Hdr.copyHeader(), rr.Preference, rr.Exchanger} +} +func (rr *L32) copy() RR { + return &L32{*rr.Hdr.copyHeader(), rr.Preference, copyIP(rr.Locator32)} +} +func (rr *L64) copy() RR { + return &L64{*rr.Hdr.copyHeader(), rr.Preference, rr.Locator64} +} +func (rr *LOC) copy() RR { + return &LOC{*rr.Hdr.copyHeader(), rr.Version, rr.Size, rr.HorizPre, rr.VertPre, rr.Latitude, rr.Longitude, rr.Altitude} +} +func (rr *LP) copy() RR { + return &LP{*rr.Hdr.copyHeader(), rr.Preference, rr.Fqdn} +} +func (rr *MB) copy() RR { + return &MB{*rr.Hdr.copyHeader(), rr.Mb} +} +func (rr *MD) copy() RR { + return &MD{*rr.Hdr.copyHeader(), rr.Md} +} +func (rr *MF) copy() RR { + return &MF{*rr.Hdr.copyHeader(), rr.Mf} +} +func (rr *MG) copy() RR { + return &MG{*rr.Hdr.copyHeader(), rr.Mg} +} +func (rr *MINFO) copy() RR { + return &MINFO{*rr.Hdr.copyHeader(), rr.Rmail, rr.Email} +} +func (rr *MR) copy() RR { + return &MR{*rr.Hdr.copyHeader(), rr.Mr} +} +func (rr *MX) copy() RR { + return &MX{*rr.Hdr.copyHeader(), rr.Preference, rr.Mx} +} +func (rr *NAPTR) copy() RR { + return &NAPTR{*rr.Hdr.copyHeader(), rr.Order, rr.Preference, rr.Flags, rr.Service, rr.Regexp, rr.Replacement} +} +func (rr *NID) copy() RR { + return &NID{*rr.Hdr.copyHeader(), rr.Preference, rr.NodeID} +} +func (rr *NIMLOC) copy() RR { + return &NIMLOC{*rr.Hdr.copyHeader(), rr.Locator} +} +func (rr *NINFO) copy() RR { + ZSData := make([]string, len(rr.ZSData)) + copy(ZSData, rr.ZSData) + return &NINFO{*rr.Hdr.copyHeader(), ZSData} +} +func (rr *NS) copy() RR { + return &NS{*rr.Hdr.copyHeader(), rr.Ns} +} +func (rr *NSAPPTR) copy() RR { + return &NSAPPTR{*rr.Hdr.copyHeader(), rr.Ptr} +} +func (rr *NSEC) copy() RR { + TypeBitMap := make([]uint16, len(rr.TypeBitMap)) + copy(TypeBitMap, rr.TypeBitMap) + return &NSEC{*rr.Hdr.copyHeader(), rr.NextDomain, TypeBitMap} +} +func (rr *NSEC3) copy() RR { + TypeBitMap := make([]uint16, len(rr.TypeBitMap)) + copy(TypeBitMap, rr.TypeBitMap) + return &NSEC3{*rr.Hdr.copyHeader(), rr.Hash, rr.Flags, rr.Iterations, rr.SaltLength, rr.Salt, rr.HashLength, rr.NextDomain, TypeBitMap} +} +func (rr *NSEC3PARAM) copy() RR { + return &NSEC3PARAM{*rr.Hdr.copyHeader(), rr.Hash, rr.Flags, rr.Iterations, rr.SaltLength, rr.Salt} +} +func (rr *OPENPGPKEY) copy() RR { + return &OPENPGPKEY{*rr.Hdr.copyHeader(), rr.PublicKey} +} +func (rr *OPT) copy() RR { + Option := make([]EDNS0, len(rr.Option)) + copy(Option, rr.Option) + return &OPT{*rr.Hdr.copyHeader(), Option} +} +func (rr *PTR) copy() RR { + return &PTR{*rr.Hdr.copyHeader(), rr.Ptr} +} +func (rr *PX) copy() RR { + return &PX{*rr.Hdr.copyHeader(), rr.Preference, rr.Map822, rr.Mapx400} +} +func (rr *RFC3597) copy() RR { + return &RFC3597{*rr.Hdr.copyHeader(), rr.Rdata} +} +func (rr *RKEY) copy() RR { + return &RKEY{*rr.Hdr.copyHeader(), rr.Flags, rr.Protocol, rr.Algorithm, rr.PublicKey} +} +func (rr *RP) copy() RR { + return &RP{*rr.Hdr.copyHeader(), rr.Mbox, rr.Txt} +} +func (rr *RRSIG) copy() RR { + return &RRSIG{*rr.Hdr.copyHeader(), rr.TypeCovered, rr.Algorithm, rr.Labels, rr.OrigTtl, rr.Expiration, rr.Inception, rr.KeyTag, rr.SignerName, rr.Signature} +} +func (rr *RT) copy() RR { + return &RT{*rr.Hdr.copyHeader(), rr.Preference, rr.Host} +} +func (rr *SMIMEA) copy() RR { + return &SMIMEA{*rr.Hdr.copyHeader(), rr.Usage, rr.Selector, rr.MatchingType, rr.Certificate} +} +func (rr *SOA) copy() RR { + return &SOA{*rr.Hdr.copyHeader(), rr.Ns, rr.Mbox, rr.Serial, rr.Refresh, rr.Retry, rr.Expire, rr.Minttl} +} +func (rr *SPF) copy() RR { + Txt := make([]string, len(rr.Txt)) + copy(Txt, rr.Txt) + return &SPF{*rr.Hdr.copyHeader(), Txt} +} +func (rr *SRV) copy() RR { + return &SRV{*rr.Hdr.copyHeader(), rr.Priority, rr.Weight, rr.Port, rr.Target} +} +func (rr *SSHFP) copy() RR { + return &SSHFP{*rr.Hdr.copyHeader(), rr.Algorithm, rr.Type, rr.FingerPrint} +} +func (rr *TA) copy() RR { + return &TA{*rr.Hdr.copyHeader(), rr.KeyTag, rr.Algorithm, rr.DigestType, rr.Digest} +} +func (rr *TALINK) copy() RR { + return &TALINK{*rr.Hdr.copyHeader(), rr.PreviousName, rr.NextName} +} +func (rr *TKEY) copy() RR { + return &TKEY{*rr.Hdr.copyHeader(), rr.Algorithm, rr.Inception, rr.Expiration, rr.Mode, rr.Error, rr.KeySize, rr.Key, rr.OtherLen, rr.OtherData} +} +func (rr *TLSA) copy() RR { + return &TLSA{*rr.Hdr.copyHeader(), rr.Usage, rr.Selector, rr.MatchingType, rr.Certificate} +} +func (rr *TSIG) copy() RR { + return &TSIG{*rr.Hdr.copyHeader(), rr.Algorithm, rr.TimeSigned, rr.Fudge, rr.MACSize, rr.MAC, rr.OrigId, rr.Error, rr.OtherLen, rr.OtherData} +} +func (rr *TXT) copy() RR { + Txt := make([]string, len(rr.Txt)) + copy(Txt, rr.Txt) + return &TXT{*rr.Hdr.copyHeader(), Txt} +} +func (rr *UID) copy() RR { + return &UID{*rr.Hdr.copyHeader(), rr.Uid} +} +func (rr *UINFO) copy() RR { + return &UINFO{*rr.Hdr.copyHeader(), rr.Uinfo} +} +func (rr *URI) copy() RR { + return &URI{*rr.Hdr.copyHeader(), rr.Priority, rr.Weight, rr.Target} +} +func (rr *X25) copy() RR { + return &X25{*rr.Hdr.copyHeader(), rr.PSDNAddress} +} diff --git a/vendor/github.com/mitchellh/mapstructure/.travis.yml b/vendor/github.com/mitchellh/mapstructure/.travis.yml deleted file mode 100644 index 7f3fe9a9..00000000 --- a/vendor/github.com/mitchellh/mapstructure/.travis.yml +++ /dev/null @@ -1,7 +0,0 @@ -language: go - -go: - - 1.4 - -script: - - go test diff --git a/vendor/github.com/mitchellh/mapstructure/README.md b/vendor/github.com/mitchellh/mapstructure/README.md deleted file mode 100644 index 659d6885..00000000 --- a/vendor/github.com/mitchellh/mapstructure/README.md +++ /dev/null @@ -1,46 +0,0 @@ -# mapstructure - -mapstructure is a Go library for decoding generic map values to structures -and vice versa, while providing helpful error handling. - -This library is most useful when decoding values from some data stream (JSON, -Gob, etc.) where you don't _quite_ know the structure of the underlying data -until you read a part of it. You can therefore read a `map[string]interface{}` -and use this library to decode it into the proper underlying native Go -structure. - -## Installation - -Standard `go get`: - -``` -$ go get github.com/mitchellh/mapstructure -``` - -## Usage & Example - -For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/mapstructure). - -The `Decode` function has examples associated with it there. - -## But Why?! - -Go offers fantastic standard libraries for decoding formats such as JSON. -The standard method is to have a struct pre-created, and populate that struct -from the bytes of the encoded format. This is great, but the problem is if -you have configuration or an encoding that changes slightly depending on -specific fields. For example, consider this JSON: - -```json -{ - "type": "person", - "name": "Mitchell" -} -``` - -Perhaps we can't populate a specific structure without first reading -the "type" field from the JSON. We could always do two passes over the -decoding of the JSON (reading the "type" first, and the rest later). -However, it is much simpler to just decode this into a `map[string]interface{}` -structure, read the "type" key, then use something like this library -to decode it into the proper structure. diff --git a/vendor/github.com/ncw/swift/.gitignore b/vendor/github.com/ncw/swift/.gitignore deleted file mode 100644 index 5cdbab79..00000000 --- a/vendor/github.com/ncw/swift/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -*~ -*.pyc -test-env* -junk/ \ No newline at end of file diff --git a/vendor/github.com/ncw/swift/.travis.yml b/vendor/github.com/ncw/swift/.travis.yml deleted file mode 100644 index bf37e41f..00000000 --- a/vendor/github.com/ncw/swift/.travis.yml +++ /dev/null @@ -1,14 +0,0 @@ -language: go -sudo: false - -go: - - 1.1.2 - - 1.2.2 - - 1.3.3 - - 1.4.2 - - 1.5.1 - - tip - -script: - - test -z "$(go fmt ./...)" - - go test diff --git a/vendor/github.com/ncw/swift/README.md b/vendor/github.com/ncw/swift/README.md deleted file mode 100644 index 2cc24cff..00000000 --- a/vendor/github.com/ncw/swift/README.md +++ /dev/null @@ -1,140 +0,0 @@ -Swift -===== - -This package provides an easy to use library for interfacing with -Swift / Openstack Object Storage / Rackspace cloud files from the Go -Language - -See here for package docs - - http://godoc.org/github.com/ncw/swift - -[![Build Status](https://api.travis-ci.org/ncw/swift.svg?branch=master)](https://travis-ci.org/ncw/swift) [![GoDoc](https://godoc.org/github.com/ncw/swift?status.svg)](https://godoc.org/github.com/ncw/swift) - -Install -------- - -Use go to install the library - - go get github.com/ncw/swift - -Usage ------ - -See here for full package docs - -- http://godoc.org/github.com/ncw/swift - -Here is a short example from the docs - - import "github.com/ncw/swift" - - // Create a connection - c := swift.Connection{ - UserName: "user", - ApiKey: "key", - AuthUrl: "auth_url", - Domain: "domain", // Name of the domain (v3 auth only) - Tenant: "tenant", // Name of the tenant (v2 auth only) - } - // Authenticate - err := c.Authenticate() - if err != nil { - panic(err) - } - // List all the containers - containers, err := c.ContainerNames(nil) - fmt.Println(containers) - // etc... - -Additions ---------- - -The `rs` sub project contains a wrapper for the Rackspace specific CDN Management interface. - -Testing -------- - -To run the tests you can either use an embedded fake Swift server -either use a real Openstack Swift server or a Rackspace Cloud files account. - -When using a real Swift server, you need to set these environment variables -before running the tests - - export SWIFT_API_USER='user' - export SWIFT_API_KEY='key' - export SWIFT_AUTH_URL='https://url.of.auth.server/v1.0' - -And optionally these if using v2 authentication - - export SWIFT_TENANT='TenantName' - export SWIFT_TENANT_ID='TenantId' - -And optionally these if using v3 authentication - - export SWIFT_TENANT='TenantName' - export SWIFT_TENANT_ID='TenantId' - export SWIFT_API_DOMAIN_ID='domain id' - export SWIFT_API_DOMAIN='domain name' - -And optionally these if using v3 trust - - export SWIFT_TRUST_ID='TrustId' - -And optionally this if you want to skip server certificate validation - - export SWIFT_AUTH_INSECURE=1 - -And optionally this to configure the connect channel timeout, in seconds - - export SWIFT_CONNECTION_CHANNEL_TIMEOUT=60 - -And optionally this to configure the data channel timeout, in seconds - - export SWIFT_DATA_CHANNEL_TIMEOUT=60 - -Then run the tests with `go test` - -License -------- - -This is free software under the terms of MIT license (check COPYING file -included in this package). - -Contact and support -------------------- - -The project website is at: - -- https://github.com/ncw/swift - -There you can file bug reports, ask for help or contribute patches. - -Authors -------- - -- Nick Craig-Wood - -Contributors ------------- - -- Brian "bojo" Jones -- Janika Liiv -- Yamamoto, Hirotaka -- Stephen -- platformpurple -- Paul Querna -- Livio Soares -- thesyncim -- lsowen -- Sylvain Baubeau -- Chris Kastorff -- Dai HaoJun -- Hua Wang -- Fabian Ruff -- Arturo Reuschenbach Puncernau -- Petr Kotek -- Stefan Majewsky -- Cezar Sa Espinola -- Sam Gunaratne -- Richard Scothern diff --git a/vendor/github.com/ncw/swift/notes.txt b/vendor/github.com/ncw/swift/notes.txt deleted file mode 100644 index f738552c..00000000 --- a/vendor/github.com/ncw/swift/notes.txt +++ /dev/null @@ -1,55 +0,0 @@ -Notes on Go Swift -================= - -Make a builder style interface like the Google Go APIs? Advantages -are that it is easy to add named methods to the service object to do -specific things. Slightly less efficient. Not sure about how to -return extra stuff though - in an object? - -Make a container struct so these could be methods on it? - -Make noResponse check for 204? - -Make storage public so it can be extended easily? - -Rename to go-swift to match user agent string? - -Reconnect on auth error - 401 when token expires isn't tested - -Make more api compatible with python cloudfiles? - -Retry operations on timeout / network errors? -- also 408 error -- GET requests only? - -Make Connection thread safe - whenever it is changed take a write lock whenever it is read from a read lock - -Add extra headers field to Connection (for via etc) - -Make errors use an error heirachy then can catch them with a type assertion - - Error(...) - ObjectCorrupted{ Error } - -Make a Debug flag in connection for logging stuff - -Object If-Match, If-None-Match, If-Modified-Since, If-Unmodified-Since etc - -Object range - -Object create, update with X-Delete-At or X-Delete-After - -Large object support -- check uploads are less than 5GB in normal mode? - -Access control CORS? - -Swift client retries and backs off for all types of errors - -Implement net error interface? - -type Error interface { - error - Timeout() bool // Is the error a timeout? - Temporary() bool // Is the error temporary? -} diff --git a/vendor/github.com/spf13/cobra/.gitignore b/vendor/github.com/spf13/cobra/.gitignore deleted file mode 100644 index 36d1a84d..00000000 --- a/vendor/github.com/spf13/cobra/.gitignore +++ /dev/null @@ -1,24 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe - -cobra.test diff --git a/vendor/github.com/spf13/cobra/.travis.yml b/vendor/github.com/spf13/cobra/.travis.yml deleted file mode 100644 index dc43afd6..00000000 --- a/vendor/github.com/spf13/cobra/.travis.yml +++ /dev/null @@ -1,8 +0,0 @@ -language: go -go: - - 1.3 - - 1.4.2 - - tip -script: - - go test ./... - - go build diff --git a/vendor/github.com/spf13/cobra/LICENSE.txt b/vendor/github.com/spf13/cobra/LICENSE.txt deleted file mode 100644 index 298f0e26..00000000 --- a/vendor/github.com/spf13/cobra/LICENSE.txt +++ /dev/null @@ -1,174 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. diff --git a/vendor/github.com/spf13/cobra/README.md b/vendor/github.com/spf13/cobra/README.md deleted file mode 100644 index b1fb0889..00000000 --- a/vendor/github.com/spf13/cobra/README.md +++ /dev/null @@ -1,485 +0,0 @@ -# Cobra - -A Commander for modern go CLI interactions - -[![Build Status](https://travis-ci.org/spf13/cobra.svg)](https://travis-ci.org/spf13/cobra) - -## Overview - -Cobra is a commander providing a simple interface to create powerful modern CLI -interfaces similar to git & go tools. In addition to providing an interface, Cobra -simultaneously provides a controller to organize your application code. - -Inspired by go, go-Commander, gh and subcommand, Cobra improves on these by -providing **fully posix compliant flags** (including short & long versions), -**nesting commands**, and the ability to **define your own help and usage** for any or -all commands. - -Cobra has an exceptionally clean interface and simple design without needless -constructors or initialization methods. - -Applications built with Cobra commands are designed to be as user friendly as -possible. Flags can be placed before or after the command (as long as a -confusing space isn’t provided). Both short and long flags can be used. A -command need not even be fully typed. The shortest unambiguous string will -suffice. Help is automatically generated and available for the application or -for a specific command using either the help command or the --help flag. - -## Concepts - -Cobra is built on a structure of commands & flags. - -**Commands** represent actions and **Flags** are modifiers for those actions. - -In the following example 'server' is a command and 'port' is a flag. - - hugo server --port=1313 - -### Commands - -Command is the central point of the application. Each interaction that -the application supports will be contained in a Command. A command can -have children commands and optionally run an action. - -In the example above 'server' is the command - -A Command has the following structure: - - type Command struct { - Use string // The one-line usage message. - Short string // The short description shown in the 'help' output. - Long string // The long message shown in the 'help ' output. - Run func(cmd *Command, args []string) // Run runs the command. - } - -### Flags - -A Flag is a way to modify the behavior of an command. Cobra supports -fully posix compliant flags as well as the go flag package. -A Cobra command can define flags that persist through to children commands -and flags that are only available to that command. - -In the example above 'port' is the flag. - -Flag functionality is provided by the [pflag -library](https://github.com/ogier/pflag), a fork of the flag standard library -which maintains the same interface while adding posix compliance. - -## Usage - -Cobra works by creating a set of commands and then organizing them into a tree. -The tree defines the structure of the application. - -Once each command is defined with it's corresponding flags, then the -tree is assigned to the commander which is finally executed. - -### Installing -Using Cobra is easy. First use go get to install the latest version -of the library. - - $ go get github.com/spf13/cobra - -Next include cobra in your application. - - import "github.com/spf13/cobra" - -### Create the root command - -The root command represents your binary itself. - -Cobra doesn't require any special constructors. Simply create your commands. - - var HugoCmd = &cobra.Command{ - Use: "hugo", - Short: "Hugo is a very fast static site generator", - Long: `A Fast and Flexible Static Site Generator built with - love by spf13 and friends in Go. - Complete documentation is available at http://hugo.spf13.com`, - Run: func(cmd *cobra.Command, args []string) { - // Do Stuff Here - }, - } - -### Create additional commands - -Additional commands can be defined. - - var versionCmd = &cobra.Command{ - Use: "version", - Short: "Print the version number of Hugo", - Long: `All software has versions. This is Hugo's`, - Run: func(cmd *cobra.Command, args []string) { - fmt.Println("Hugo Static Site Generator v0.9 -- HEAD") - }, - } - -### Attach command to its parent -In this example we are attaching it to the root, but commands can be attached at any level. - - HugoCmd.AddCommand(versionCmd) - -### Assign flags to a command - -Since the flags are defined and used in different locations, we need to -define a variable outside with the correct scope to assign the flag to -work with. - - var Verbose bool - var Source string - -There are two different approaches to assign a flag. - -#### Persistent Flags - -A flag can be 'persistent' meaning that this flag will be available to the -command it's assigned to as well as every command under that command. For -global flags assign a flag as a persistent flag on the root. - - HugoCmd.PersistentFlags().BoolVarP(&Verbose, "verbose", "v", false, "verbose output") - -#### Local Flags - -A flag can also be assigned locally which will only apply to that specific command. - - HugoCmd.Flags().StringVarP(&Source, "source", "s", "", "Source directory to read from") - -### Remove a command from its parent - -Removing a command is not a common action in simple programs but it allows 3rd parties to customize an existing command tree. - -In this example, we remove the existing `VersionCmd` command of an existing root command, and we replace it by our own version. - - mainlib.RootCmd.RemoveCommand(mainlib.VersionCmd) - mainlib.RootCmd.AddCommand(versionCmd) - -### Once all commands and flags are defined, Execute the commands - -Execute should be run on the root for clarity, though it can be called on any command. - - HugoCmd.Execute() - -## Example - -In the example below we have defined three commands. Two are at the top level -and one (cmdTimes) is a child of one of the top commands. In this case the root -is not executable meaning that a subcommand is required. This is accomplished -by not providing a 'Run' for the 'rootCmd'. - -We have only defined one flag for a single command. - -More documentation about flags is available at https://github.com/spf13/pflag - - import( - "github.com/spf13/cobra" - "fmt" - "strings" - ) - - func main() { - - var echoTimes int - - var cmdPrint = &cobra.Command{ - Use: "print [string to print]", - Short: "Print anything to the screen", - Long: `print is for printing anything back to the screen. - For many years people have printed back to the screen. - `, - Run: func(cmd *cobra.Command, args []string) { - fmt.Println("Print: " + strings.Join(args, " ")) - }, - } - - var cmdEcho = &cobra.Command{ - Use: "echo [string to echo]", - Short: "Echo anything to the screen", - Long: `echo is for echoing anything back. - Echo works a lot like print, except it has a child command. - `, - Run: func(cmd *cobra.Command, args []string) { - fmt.Println("Print: " + strings.Join(args, " ")) - }, - } - - var cmdTimes = &cobra.Command{ - Use: "times [# times] [string to echo]", - Short: "Echo anything to the screen more times", - Long: `echo things multiple times back to the user by providing - a count and a string.`, - Run: func(cmd *cobra.Command, args []string) { - for i:=0; i < echoTimes; i++ { - fmt.Println("Echo: " + strings.Join(args, " ")) - } - }, - } - - cmdTimes.Flags().IntVarP(&echoTimes, "times", "t", 1, "times to echo the input") - - var rootCmd = &cobra.Command{Use: "app"} - rootCmd.AddCommand(cmdPrint, cmdEcho) - cmdEcho.AddCommand(cmdTimes) - rootCmd.Execute() - } - -For a more complete example of a larger application, please checkout [Hugo](http://hugo.spf13.com) - -## The Help Command - -Cobra automatically adds a help command to your application when you have subcommands. -This will be called when a user runs 'app help'. Additionally help will also -support all other commands as input. Say for instance you have a command called -'create' without any additional configuration cobra will work when 'app help -create' is called. Every command will automatically have the '--help' flag added. - -### Example - -The following output is automatically generated by cobra. Nothing beyond the -command and flag definitions are needed. - - > hugo help - - A Fast and Flexible Static Site Generator built with - love by spf13 and friends in Go. - - Complete documentation is available at http://hugo.spf13.com - - Usage: - hugo [flags] - hugo [command] - - Available Commands: - server :: Hugo runs it's own a webserver to render the files - version :: Print the version number of Hugo - check :: Check content in the source directory - benchmark :: Benchmark hugo by building a site a number of times - help [command] :: Help about any command - - Available Flags: - -b, --base-url="": hostname (and path) to the root eg. http://spf13.com/ - -D, --build-drafts=false: include content marked as draft - --config="": config file (default is path/config.yaml|json|toml) - -d, --destination="": filesystem path to write files to - -s, --source="": filesystem path to read files relative from - --stepAnalysis=false: display memory and timing of different steps of the program - --uglyurls=false: if true, use /filename.html instead of /filename/ - -v, --verbose=false: verbose output - -w, --watch=false: watch filesystem for changes and recreate as needed - - Use "hugo help [command]" for more information about that command. - - - -Help is just a command like any other. There is no special logic or behavior -around it. In fact you can provide your own if you want. - -### Defining your own help - -You can provide your own Help command or you own template for the default command to use. - -The default help command is - - func (c *Command) initHelp() { - if c.helpCommand == nil { - c.helpCommand = &Command{ - Use: "help [command]", - Short: "Help about any command", - Long: `Help provides help for any command in the application. - Simply type ` + c.Name() + ` help [path to command] for full details.`, - Run: c.HelpFunc(), - } - } - c.AddCommand(c.helpCommand) - } - -You can provide your own command, function or template through the following methods. - - command.SetHelpCommand(cmd *Command) - - command.SetHelpFunc(f func(*Command, []string)) - - command.SetHelpTemplate(s string) - -The latter two will also apply to any children commands. - -## Usage - -When the user provides an invalid flag or invalid command Cobra responds by -showing the user the 'usage' - -### Example -You may recognize this from the help above. That's because the default help -embeds the usage as part of it's output. - - Usage: - hugo [flags] - hugo [command] - - Available Commands: - server Hugo runs it's own a webserver to render the files - version Print the version number of Hugo - check Check content in the source directory - benchmark Benchmark hugo by building a site a number of times - help [command] Help about any command - - Available Flags: - -b, --base-url="": hostname (and path) to the root eg. http://spf13.com/ - -D, --build-drafts=false: include content marked as draft - --config="": config file (default is path/config.yaml|json|toml) - -d, --destination="": filesystem path to write files to - -s, --source="": filesystem path to read files relative from - --stepAnalysis=false: display memory and timing of different steps of the program - --uglyurls=false: if true, use /filename.html instead of /filename/ - -v, --verbose=false: verbose output - -w, --watch=false: watch filesystem for changes and recreate as needed - -### Defining your own usage -You can provide your own usage function or template for cobra to use. - -The default usage function is - - return func(c *Command) error { - err := tmpl(c.Out(), c.UsageTemplate(), c) - return err - } - -Like help the function and template are over ridable through public methods. - - command.SetUsageFunc(f func(*Command) error) - - command.SetUsageTemplate(s string) - -## PreRun or PostRun Hooks - -It is possible to run functions before or after the main `Run` function of your command. The `PersistentPreRun` and `PreRun` functions will be executed before `Run`. `PersistendPostRun` and `PostRun` will be executed after `Run`. The `Persistent*Run` functions will be inherrited by children if they do not declare their own. These function are run in the following order: - -- `PersistentPreRun` -- `PreRun` -- `Run` -- `PostRun` -- `PersistenPostRun` - -And example of two commands which use all of these features is below. When the subcommand in executed it will run the root command's `PersistentPreRun` but not the root command's `PersistentPostRun` - -```go -package main - -import ( - "fmt" - - "github.com/spf13/cobra" -) - -func main() { - - var rootCmd = &cobra.Command{ - Use: "root [sub]", - Short: "My root command", - PersistentPreRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd PersistentPreRun with args: %v\n", args) - }, - PreRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd PreRun with args: %v\n", args) - }, - Run: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd Run with args: %v\n", args) - }, - PostRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd PostRun with args: %v\n", args) - }, - PersistentPostRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd PersistentPostRun with args: %v\n", args) - }, - } - - var subCmd = &cobra.Command{ - Use: "sub [no options!]", - Short: "My sub command", - PreRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside subCmd PreRun with args: %v\n", args) - }, - Run: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside subCmd Run with args: %v\n", args) - }, - PostRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside subCmd PostRun with args: %v\n", args) - }, - PersistentPostRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside subCmd PersistentPostRun with args: %v\n", args) - }, - } - - rootCmd.AddCommand(subCmd) - - rootCmd.SetArgs([]string{""}) - _ = rootCmd.Execute() - fmt.Print("\n") - rootCmd.SetArgs([]string{"sub", "arg1", "arg2"}) - _ = rootCmd.Execute() -} -``` - -## Generating markdown formatted documentation for your command - -Cobra can generate a markdown formatted document based on the subcommands, flags, etc. A simple example of how to do this for your command can be found in [Markdown Docs](md_docs.md) - -## Generating bash completions for your command - -Cobra can generate a bash completions file. If you add more information to your command these completions can be amazingly powerful and flexible. Read more about [Bash Completions](bash_completions.md) - -## Debugging - -Cobra provides a ‘DebugFlags’ method on a command which when called will print -out everything Cobra knows about the flags for each command - -### Example - - command.DebugFlags() - -## Release Notes -* **0.9.0** June 17, 2014 - * flags can appears anywhere in the args (provided they are unambiguous) - * --help prints usage screen for app or command - * Prefix matching for commands - * Cleaner looking help and usage output - * Extensive test suite -* **0.8.0** Nov 5, 2013 - * Reworked interface to remove commander completely - * Command now primary structure - * No initialization needed - * Usage & Help templates & functions definable at any level - * Updated Readme -* **0.7.0** Sept 24, 2013 - * Needs more eyes - * Test suite - * Support for automatic error messages - * Support for help command - * Support for printing to any io.Writer instead of os.Stderr - * Support for persistent flags which cascade down tree - * Ready for integration into Hugo -* **0.1.0** Sept 3, 2013 - * Implement first draft - -## ToDo -* Launch proper documentation site - -## Contributing - -1. Fork it -2. Create your feature branch (`git checkout -b my-new-feature`) -3. Commit your changes (`git commit -am 'Add some feature'`) -4. Push to the branch (`git push origin my-new-feature`) -5. Create new Pull Request - -## Contributors - -Names in no particular order: - -* [spf13](https://github.com/spf13) - -## License - -Cobra is released under the Apache 2.0 license. See [LICENSE.txt](https://github.com/spf13/cobra/blob/master/LICENSE.txt) - - -[![Bitdeli Badge](https://d2weczhvl823v0.cloudfront.net/spf13/cobra/trend.png)](https://bitdeli.com/free "Bitdeli Badge") - diff --git a/vendor/github.com/spf13/cobra/bash_completions.md b/vendor/github.com/spf13/cobra/bash_completions.md deleted file mode 100644 index 204704ef..00000000 --- a/vendor/github.com/spf13/cobra/bash_completions.md +++ /dev/null @@ -1,149 +0,0 @@ -# Generating Bash Completions For Your Own cobra.Command - -Generating bash completions from a cobra command is incredibly easy. An actual program which does so for the kubernetes kubectl binary is as follows: - -```go -package main - -import ( - "io/ioutil" - "os" - - "github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd" -) - -func main() { - kubectl := cmd.NewFactory(nil).NewKubectlCommand(os.Stdin, ioutil.Discard, ioutil.Discard) - kubectl.GenBashCompletionFile("out.sh") -} -``` - -That will get you completions of subcommands and flags. If you make additional annotations to your code, you can get even more intelligent and flexible behavior. - -## Creating your own custom functions - -Some more actual code that works in kubernetes: - -```bash -const ( - bash_completion_func = `__kubectl_parse_get() -{ - local kubectl_output out - if kubectl_output=$(kubectl get --no-headers "$1" 2>/dev/null); then - out=($(echo "${kubectl_output}" | awk '{print $1}')) - COMPREPLY=( $( compgen -W "${out[*]}" -- "$cur" ) ) - fi -} - -__kubectl_get_resource() -{ - if [[ ${#nouns[@]} -eq 0 ]]; then - return 1 - fi - __kubectl_parse_get ${nouns[${#nouns[@]} -1]} - if [[ $? -eq 0 ]]; then - return 0 - fi -} - -__custom_func() { - case ${last_command} in - kubectl_get | kubectl_describe | kubectl_delete | kubectl_stop) - __kubectl_get_resource - return - ;; - *) - ;; - esac -} -`) -``` - -And then I set that in my command definition: - -```go -cmds := &cobra.Command{ - Use: "kubectl", - Short: "kubectl controls the Kubernetes cluster manager", - Long: `kubectl controls the Kubernetes cluster manager. - -Find more information at https://github.com/GoogleCloudPlatform/kubernetes.`, - Run: runHelp, - BashCompletionFunction: bash_completion_func, -} -``` - -The `BashCompletionFunction` option is really only valid/useful on the root command. Doing the above will cause `__custom_func()` to be called when the built in processor was unable to find a solution. In the case of kubernetes a valid command might look something like `kubectl get pod [mypod]`. If you type `kubectl get pod [tab][tab]` the `__customc_func()` will run because the cobra.Command only understood "kubectl" and "get." `__custom_func()` will see that the cobra.Command is "kubectl_get" and will thus call another helper `__kubectl_get_resource()`. `__kubectl_get_resource` will look at the 'nouns' collected. In our example the only noun will be `pod`. So it will call `__kubectl_parse_get pod`. `__kubectl_parse_get` will actually call out to kubernetes and get any pods. It will then set `COMPREPLY` to valid pods! - -## Have the completions code complete your 'nouns' - -In the above example "pod" was assumed to already be typed. But if you want `kubectl get [tab][tab]` to show a list of valid "nouns" you have to set them. Simplified code from `kubectl get` looks like: - -```go -validArgs []string = { "pods", "nodes", "services", "replicationControllers" } - -cmd := &cobra.Command{ - Use: "get [(-o|--output=)json|yaml|template|...] (RESOURCE [NAME] | RESOURCE/NAME ...)", - Short: "Display one or many resources", - Long: get_long, - Example: get_example, - Run: func(cmd *cobra.Command, args []string) { - err := RunGet(f, out, cmd, args) - util.CheckErr(err) - }, - ValidArgs: validArgs, -} -``` - -Notice we put the "ValidArgs" on the "get" subcommand. Doing so will give results like - -```bash -# kubectl get [tab][tab] -nodes pods replicationControllers services -``` - -## Mark flags as required - -Most of the time completions will only show subcommands. But if a flag is required to make a subcommand work, you probably want it to show up when the user types [tab][tab]. Marking a flag as 'Required' is incredibly easy. - -```go -cmd.MarkFlagRequired("pod") -cmd.MarkFlagRequired("container") -``` - -and you'll get something like - -```bash -# kubectl exec [tab][tab][tab] --c --container= -p --pod= -``` - -# Specify valid filename extensions for flags that take a filename - -In this example we use --filename= and expect to get a json or yaml file as the argument. To make this easier we annotate the --filename flag with valid filename extensions. - -```go - annotations := []string{"json", "yaml", "yml"} - annotation := make(map[string][]string) - annotation[cobra.BashCompFilenameExt] = annotations - - flag := &pflag.Flag{ - Name: "filename", - Shorthand: "f", - Usage: usage, - Value: value, - DefValue: value.String(), - Annotations: annotation, - } - cmd.Flags().AddFlag(flag) -``` - -Now when you run a command with this filename flag you'll get something like - -```bash -# kubectl create -f -test/ example/ rpmbuild/ -hello.yml test.json -``` - -So while there are many other files in the CWD it only shows me subdirs and those with valid extensions. diff --git a/vendor/github.com/spf13/cobra/md_docs.md b/vendor/github.com/spf13/cobra/md_docs.md deleted file mode 100644 index 3a0d55ab..00000000 --- a/vendor/github.com/spf13/cobra/md_docs.md +++ /dev/null @@ -1,81 +0,0 @@ -# Generating Markdown Docs For Your Own cobra.Command - -## Generate markdown docs for the entire command tree - -This program can actually generate docs for the kubectl command in the kubernetes project - -```go -package main - -import ( - "io/ioutil" - "os" - - "github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd" - "github.com/spf13/cobra" -) - -func main() { - kubectl := cmd.NewFactory(nil).NewKubectlCommand(os.Stdin, ioutil.Discard, ioutil.Discard) - cobra.GenMarkdownTree(kubectl, "./") -} -``` - -This will generate a whole series of files, one for each command in the tree, in the directory specified (in this case "./") - -## Generate markdown docs for a single command - -You may wish to have more control over the output, or only generate for a single command, instead of the entire command tree. If this is the case you may prefer to `GenMarkdown` instead of `GenMarkdownTree` - -```go - out := new(bytes.Buffer) - cobra.GenMarkdown(cmd, out) -``` - -This will write the markdown doc for ONLY "cmd" into the out, buffer. - -## Customize the output - -Both `GenMarkdown` and `GenMarkdownTree` have alternate versions with callbacks to get some control of the output: - -```go -func GenMarkdownTreeCustom(cmd *Command, dir string, filePrepender func(string) string, linkHandler func(string) string) { - //... -} -``` - -```go -func GenMarkdownCustom(cmd *Command, out *bytes.Buffer, linkHandler func(string) string) { - //... -} -``` - -The `filePrepender` will prepend the return value given the full filepath to the rendered Markdown file. A common use case is to add front matter to use the generated documentation with [Hugo](http://gohugo.io/): - -```go -const fmTemplate = `--- -date: %s -title: "%s" -slug: %s -url: %s ---- -` - -filePrepender := func(filename string) string { - now := time.Now().Format(time.RFC3339) - name := filepath.Base(filename) - base := strings.TrimSuffix(name, path.Ext(name)) - url := "/commands/" + strings.ToLower(base) + "/" - return fmt.Sprintf(fmTemplate, now, strings.Replace(base, "_", " ", -1), base, url) -} -``` - -The `linkHandler` can be used to customize the rendered internal links to the commands, given a filename: - -```go -linkHandler := func(name string) string { - base := strings.TrimSuffix(name, path.Ext(name)) - return "/commands/" + strings.ToLower(base) + "/" -} -``` - diff --git a/vendor/github.com/spf13/pflag/.travis.yml b/vendor/github.com/spf13/pflag/.travis.yml deleted file mode 100644 index c4d88e37..00000000 --- a/vendor/github.com/spf13/pflag/.travis.yml +++ /dev/null @@ -1,8 +0,0 @@ -sudo: false - -language: go - -go: - - 1.3 - - 1.4 - - tip diff --git a/vendor/github.com/spf13/pflag/README.md b/vendor/github.com/spf13/pflag/README.md deleted file mode 100644 index f7d63500..00000000 --- a/vendor/github.com/spf13/pflag/README.md +++ /dev/null @@ -1,191 +0,0 @@ -[![Build Status](https://travis-ci.org/spf13/pflag.svg?branch=master)](https://travis-ci.org/spf13/pflag) - -## Description - -pflag is a drop-in replacement for Go's flag package, implementing -POSIX/GNU-style --flags. - -pflag is compatible with the [GNU extensions to the POSIX recommendations -for command-line options][1]. For a more precise description, see the -"Command-line flag syntax" section below. - -[1]: http://www.gnu.org/software/libc/manual/html_node/Argument-Syntax.html - -pflag is available under the same style of BSD license as the Go language, -which can be found in the LICENSE file. - -## Installation - -pflag is available using the standard `go get` command. - -Install by running: - - go get github.com/ogier/pflag - -Run tests by running: - - go test github.com/ogier/pflag - -## Usage - -pflag is a drop-in replacement of Go's native flag package. If you import -pflag under the name "flag" then all code should continue to function -with no changes. - -``` go -import flag "github.com/ogier/pflag" -``` - -There is one exception to this: if you directly instantiate the Flag struct -there is one more field "Shorthand" that you will need to set. -Most code never instantiates this struct directly, and instead uses -functions such as String(), BoolVar(), and Var(), and is therefore -unaffected. - -Define flags using flag.String(), Bool(), Int(), etc. - -This declares an integer flag, -flagname, stored in the pointer ip, with type *int. - -``` go -var ip *int = flag.Int("flagname", 1234, "help message for flagname") -``` - -If you like, you can bind the flag to a variable using the Var() functions. - -``` go -var flagvar int -func init() { - flag.IntVar(&flagvar, "flagname", 1234, "help message for flagname") -} -``` - -Or you can create custom flags that satisfy the Value interface (with -pointer receivers) and couple them to flag parsing by - -``` go -flag.Var(&flagVal, "name", "help message for flagname") -``` - -For such flags, the default value is just the initial value of the variable. - -After all flags are defined, call - -``` go -flag.Parse() -``` - -to parse the command line into the defined flags. - -Flags may then be used directly. If you're using the flags themselves, -they are all pointers; if you bind to variables, they're values. - -``` go -fmt.Println("ip has value ", *ip) -fmt.Println("flagvar has value ", flagvar) -``` - -After parsing, the arguments after the flag are available as the -slice flag.Args() or individually as flag.Arg(i). -The arguments are indexed from 0 through flag.NArg()-1. - -The pflag package also defines some new functions that are not in flag, -that give one-letter shorthands for flags. You can use these by appending -'P' to the name of any function that defines a flag. - -``` go -var ip = flag.IntP("flagname", "f", 1234, "help message") -var flagvar bool -func init() { - flag.BoolVarP("boolname", "b", true, "help message") -} -flag.VarP(&flagVar, "varname", "v", 1234, "help message") -``` - -Shorthand letters can be used with single dashes on the command line. -Boolean shorthand flags can be combined with other shorthand flags. - -The default set of command-line flags is controlled by -top-level functions. The FlagSet type allows one to define -independent sets of flags, such as to implement subcommands -in a command-line interface. The methods of FlagSet are -analogous to the top-level functions for the command-line -flag set. - -## Command line flag syntax - -``` ---flag // boolean flags only ---flag=x -``` - -Unlike the flag package, a single dash before an option means something -different than a double dash. Single dashes signify a series of shorthand -letters for flags. All but the last shorthand letter must be boolean flags. - -``` -// boolean flags --f --abc - -// non-boolean flags --n 1234 --Ifile - -// mixed --abcs "hello" --abcn1234 -``` - -Flag parsing stops after the terminator "--". Unlike the flag package, -flags can be interspersed with arguments anywhere on the command line -before this terminator. - -Integer flags accept 1234, 0664, 0x1234 and may be negative. -Boolean flags (in their long form) accept 1, 0, t, f, true, false, -TRUE, FALSE, True, False. -Duration flags accept any input valid for time.ParseDuration. - -## Mutating or "Normalizing" Flag names - -It is possible to set a custom flag name 'normalization function.' It allows flag names to be mutated both when created in the code and when used on the command line to some 'normalized' form. The 'normalized' form is used for comparison. Two examples of using the custom normalization func follow. - -**Example #1**: You want -, _, and . in flags to compare the same. aka --my-flag == --my_flag == --my.flag - -```go -func wordSepNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName { - from := []string{"-", "_"} - to := "." - for _, sep := range from { - name = strings.Replace(name, sep, to, -1) - } - return pflag.NormalizedName(name) -} - -myFlagSet.SetNormalizeFunc(wordSepNormalizeFunc) -``` - -**Example #2**: You want to alias two flags. aka --old-flag-name == --new-flag-name - -```go -func aliasNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName { - switch name { - case "old-flag-name": - name = "new-flag-name" - break - } - return pflag.NormalizedName(name) -} - -myFlagSet.SetNormalizeFunc(aliasNormalizeFunc) -``` - -## More info - -You can see the full reference documentation of the pflag package -[at godoc.org][3], or through go's standard documentation system by -running `godoc -http=:6060` and browsing to -[http://localhost:6060/pkg/github.com/ogier/pflag][2] after -installation. - -[2]: http://localhost:6060/pkg/github.com/ogier/pflag -[3]: http://godoc.org/github.com/ogier/pflag diff --git a/vendor/github.com/stevvooe/resumable/.gitignore b/vendor/github.com/stevvooe/resumable/.gitignore deleted file mode 100644 index daf913b1..00000000 --- a/vendor/github.com/stevvooe/resumable/.gitignore +++ /dev/null @@ -1,24 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof diff --git a/vendor/github.com/stevvooe/resumable/README.md b/vendor/github.com/stevvooe/resumable/README.md deleted file mode 100644 index d2d3fb89..00000000 --- a/vendor/github.com/stevvooe/resumable/README.md +++ /dev/null @@ -1,6 +0,0 @@ -# go-crypto -A Subset of the Go `crypto` Package with a Resumable Hash Interface - -### Documentation - -GoDocs: http://godoc.org/github.com/stevvooe/resumable diff --git a/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/LICENSE b/vendor/github.com/xenolf/lego/LICENSE similarity index 100% rename from vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/LICENSE rename to vendor/github.com/xenolf/lego/LICENSE diff --git a/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/challenges.go b/vendor/github.com/xenolf/lego/acme/challenges.go similarity index 100% rename from vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/challenges.go rename to vendor/github.com/xenolf/lego/acme/challenges.go diff --git a/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/client.go b/vendor/github.com/xenolf/lego/acme/client.go similarity index 91% rename from vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/client.go rename to vendor/github.com/xenolf/lego/acme/client.go index 16e4cbe0..445dc2bd 100644 --- a/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/client.go +++ b/vendor/github.com/xenolf/lego/acme/client.go @@ -103,6 +103,8 @@ func (c *Client) SetChallengeProvider(challenge Challenge, p ChallengeProvider) c.solvers[challenge] = &httpChallenge{jws: c.jws, validate: validate, provider: p} case TLSSNI01: c.solvers[challenge] = &tlsSNIChallenge{jws: c.jws, validate: validate, provider: p} + case DNS01: + c.solvers[challenge] = &dnsChallenge{jws: c.jws, validate: validate, provider: p} default: return fmt.Errorf("Unknown challenge %v", challenge) } @@ -187,6 +189,68 @@ func (c *Client) Register() (*RegistrationResource, error) { return reg, nil } +// DeleteRegistration deletes the client's user registration from the ACME +// server. +func (c *Client) DeleteRegistration() error { + if c == nil || c.user == nil { + return errors.New("acme: cannot unregister a nil client or user") + } + logf("[INFO] acme: Deleting account for %s", c.user.GetEmail()) + + regMsg := registrationMessage{ + Resource: "reg", + Delete: true, + } + + _, err := postJSON(c.jws, c.user.GetRegistration().URI, regMsg, nil) + if err != nil { + return err + } + + return nil +} + +// QueryRegistration runs a POST request on the client's registration and +// returns the result. +// +// This is similar to the Register function, but acting on an existing +// registration link and resource. +func (c *Client) QueryRegistration() (*RegistrationResource, error) { + if c == nil || c.user == nil { + return nil, errors.New("acme: cannot query the registration of a nil client or user") + } + // Log the URL here instead of the email as the email may not be set + logf("[INFO] acme: Querying account for %s", c.user.GetRegistration().URI) + + regMsg := registrationMessage{ + Resource: "reg", + } + + var serverReg Registration + hdr, err := postJSON(c.jws, c.user.GetRegistration().URI, regMsg, &serverReg) + if err != nil { + return nil, err + } + + reg := &RegistrationResource{Body: serverReg} + + links := parseLinks(hdr["Link"]) + // Location: header is not returned so this needs to be populated off of + // existing URI + reg.URI = c.user.GetRegistration().URI + if links["terms-of-service"] != "" { + reg.TosURL = links["terms-of-service"] + } + + if links["next"] != "" { + reg.NewAuthzURL = links["next"] + } else { + return nil, errors.New("acme: No new-authz link in response to registration query") + } + + return reg, nil +} + // AgreeToTOS updates the Client registration and sends the agreement to // the server. func (c *Client) AgreeToTOS() error { diff --git a/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/crypto.go b/vendor/github.com/xenolf/lego/acme/crypto.go similarity index 100% rename from vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/crypto.go rename to vendor/github.com/xenolf/lego/acme/crypto.go diff --git a/vendor/github.com/xenolf/lego/acme/dns_challenge.go b/vendor/github.com/xenolf/lego/acme/dns_challenge.go new file mode 100644 index 00000000..2f45e2a9 --- /dev/null +++ b/vendor/github.com/xenolf/lego/acme/dns_challenge.go @@ -0,0 +1,279 @@ +package acme + +import ( + "crypto/sha256" + "encoding/base64" + "errors" + "fmt" + "log" + "net" + "strings" + "time" + + "github.com/miekg/dns" + "golang.org/x/net/publicsuffix" +) + +type preCheckDNSFunc func(fqdn, value string) (bool, error) + +var ( + preCheckDNS preCheckDNSFunc = checkDNSPropagation + fqdnToZone = map[string]string{} +) + +var RecursiveNameservers = []string{ + "google-public-dns-a.google.com:53", + "google-public-dns-b.google.com:53", +} + +// DNSTimeout is used to override the default DNS timeout of 10 seconds. +var DNSTimeout = 10 * time.Second + +// DNS01Record returns a DNS record which will fulfill the `dns-01` challenge +func DNS01Record(domain, keyAuth string) (fqdn string, value string, ttl int) { + keyAuthShaBytes := sha256.Sum256([]byte(keyAuth)) + // base64URL encoding without padding + keyAuthSha := base64.URLEncoding.EncodeToString(keyAuthShaBytes[:sha256.Size]) + value = strings.TrimRight(keyAuthSha, "=") + ttl = 120 + fqdn = fmt.Sprintf("_acme-challenge.%s.", domain) + return +} + +// dnsChallenge implements the dns-01 challenge according to ACME 7.5 +type dnsChallenge struct { + jws *jws + validate validateFunc + provider ChallengeProvider +} + +func (s *dnsChallenge) Solve(chlng challenge, domain string) error { + logf("[INFO][%s] acme: Trying to solve DNS-01", domain) + + if s.provider == nil { + return errors.New("No DNS Provider configured") + } + + // Generate the Key Authorization for the challenge + keyAuth, err := getKeyAuthorization(chlng.Token, s.jws.privKey) + if err != nil { + return err + } + + err = s.provider.Present(domain, chlng.Token, keyAuth) + if err != nil { + return fmt.Errorf("Error presenting token: %s", err) + } + defer func() { + err := s.provider.CleanUp(domain, chlng.Token, keyAuth) + if err != nil { + log.Printf("Error cleaning up %s: %v ", domain, err) + } + }() + + fqdn, value, _ := DNS01Record(domain, keyAuth) + + logf("[INFO][%s] Checking DNS record propagation...", domain) + + var timeout, interval time.Duration + switch provider := s.provider.(type) { + case ChallengeProviderTimeout: + timeout, interval = provider.Timeout() + default: + timeout, interval = 60*time.Second, 2*time.Second + } + + err = WaitFor(timeout, interval, func() (bool, error) { + return preCheckDNS(fqdn, value) + }) + if err != nil { + return err + } + + return s.validate(s.jws, domain, chlng.URI, challenge{Resource: "challenge", Type: chlng.Type, Token: chlng.Token, KeyAuthorization: keyAuth}) +} + +// checkDNSPropagation checks if the expected TXT record has been propagated to all authoritative nameservers. +func checkDNSPropagation(fqdn, value string) (bool, error) { + // Initial attempt to resolve at the recursive NS + r, err := dnsQuery(fqdn, dns.TypeTXT, RecursiveNameservers, true) + if err != nil { + return false, err + } + if r.Rcode == dns.RcodeSuccess { + // If we see a CNAME here then use the alias + for _, rr := range r.Answer { + if cn, ok := rr.(*dns.CNAME); ok { + if cn.Hdr.Name == fqdn { + fqdn = cn.Target + break + } + } + } + } + + authoritativeNss, err := lookupNameservers(fqdn) + if err != nil { + return false, err + } + + return checkAuthoritativeNss(fqdn, value, authoritativeNss) +} + +// checkAuthoritativeNss queries each of the given nameservers for the expected TXT record. +func checkAuthoritativeNss(fqdn, value string, nameservers []string) (bool, error) { + for _, ns := range nameservers { + r, err := dnsQuery(fqdn, dns.TypeTXT, []string{net.JoinHostPort(ns, "53")}, false) + if err != nil { + return false, err + } + + if r.Rcode != dns.RcodeSuccess { + return false, fmt.Errorf("NS %s returned %s for %s", ns, dns.RcodeToString[r.Rcode], fqdn) + } + + var found bool + for _, rr := range r.Answer { + if txt, ok := rr.(*dns.TXT); ok { + if strings.Join(txt.Txt, "") == value { + found = true + break + } + } + } + + if !found { + return false, fmt.Errorf("NS %s did not return the expected TXT record", ns) + } + } + + return true, nil +} + +// dnsQuery will query a nameserver, iterating through the supplied servers as it retries +// The nameserver should include a port, to facilitate testing where we talk to a mock dns server. +func dnsQuery(fqdn string, rtype uint16, nameservers []string, recursive bool) (in *dns.Msg, err error) { + m := new(dns.Msg) + m.SetQuestion(fqdn, rtype) + m.SetEdns0(4096, false) + + if !recursive { + m.RecursionDesired = false + } + + // Will retry the request based on the number of servers (n+1) + for i := 1; i <= len(nameservers)+1; i++ { + ns := nameservers[i%len(nameservers)] + udp := &dns.Client{Net: "udp", Timeout: DNSTimeout} + in, _, err = udp.Exchange(m, ns) + + if err == dns.ErrTruncated { + tcp := &dns.Client{Net: "tcp", Timeout: DNSTimeout} + // If the TCP request suceeds, the err will reset to nil + in, _, err = tcp.Exchange(m, ns) + } + + if err == nil { + break + } + } + return +} + +// lookupNameservers returns the authoritative nameservers for the given fqdn. +func lookupNameservers(fqdn string) ([]string, error) { + var authoritativeNss []string + + zone, err := FindZoneByFqdn(fqdn, RecursiveNameservers) + if err != nil { + return nil, err + } + + r, err := dnsQuery(zone, dns.TypeNS, RecursiveNameservers, true) + if err != nil { + return nil, err + } + + for _, rr := range r.Answer { + if ns, ok := rr.(*dns.NS); ok { + authoritativeNss = append(authoritativeNss, strings.ToLower(ns.Ns)) + } + } + + if len(authoritativeNss) > 0 { + return authoritativeNss, nil + } + return nil, fmt.Errorf("Could not determine authoritative nameservers") +} + +// FindZoneByFqdn determines the zone of the given fqdn +func FindZoneByFqdn(fqdn string, nameservers []string) (string, error) { + // Do we have it cached? + if zone, ok := fqdnToZone[fqdn]; ok { + return zone, nil + } + + // Query the authoritative nameserver for a hopefully non-existing SOA record, + // in the authority section of the reply it will have the SOA of the + // containing zone. rfc2308 has this to say on the subject: + // Name servers authoritative for a zone MUST include the SOA record of + // the zone in the authority section of the response when reporting an + // NXDOMAIN or indicating that no data (NODATA) of the requested type exists + in, err := dnsQuery(fqdn, dns.TypeSOA, nameservers, true) + if err != nil { + return "", err + } + if in.Rcode != dns.RcodeNameError { + if in.Rcode != dns.RcodeSuccess { + return "", fmt.Errorf("The NS returned %s for %s", dns.RcodeToString[in.Rcode], fqdn) + } + // We have a success, so one of the answers has to be a SOA RR + for _, ans := range in.Answer { + if soa, ok := ans.(*dns.SOA); ok { + return checkIfTLD(fqdn, soa) + } + } + // Or it is NODATA, fall through to NXDOMAIN + } + // Search the authority section for our precious SOA RR + for _, ns := range in.Ns { + if soa, ok := ns.(*dns.SOA); ok { + return checkIfTLD(fqdn, soa) + } + } + return "", fmt.Errorf("The NS did not return the expected SOA record in the authority section") +} + +func checkIfTLD(fqdn string, soa *dns.SOA) (string, error) { + zone := soa.Hdr.Name + // If we ended up on one of the TLDs, it means the domain did not exist. + publicsuffix, _ := publicsuffix.PublicSuffix(UnFqdn(zone)) + if publicsuffix == UnFqdn(zone) { + return "", fmt.Errorf("Could not determine zone authoritatively") + } + fqdnToZone[fqdn] = zone + return zone, nil +} + +// ClearFqdnCache clears the cache of fqdn to zone mappings. Primarily used in testing. +func ClearFqdnCache() { + fqdnToZone = map[string]string{} +} + +// ToFqdn converts the name into a fqdn appending a trailing dot. +func ToFqdn(name string) string { + n := len(name) + if n == 0 || name[n-1] == '.' { + return name + } + return name + "." +} + +// UnFqdn converts the fqdn into a name removing the trailing dot. +func UnFqdn(name string) string { + n := len(name) + if n != 0 && name[n-1] == '.' { + return name[:n-1] + } + return name +} diff --git a/vendor/github.com/xenolf/lego/acme/dns_challenge_manual.go b/vendor/github.com/xenolf/lego/acme/dns_challenge_manual.go new file mode 100644 index 00000000..240384e6 --- /dev/null +++ b/vendor/github.com/xenolf/lego/acme/dns_challenge_manual.go @@ -0,0 +1,53 @@ +package acme + +import ( + "bufio" + "fmt" + "os" +) + +const ( + dnsTemplate = "%s %d IN TXT \"%s\"" +) + +// DNSProviderManual is an implementation of the ChallengeProvider interface +type DNSProviderManual struct{} + +// NewDNSProviderManual returns a DNSProviderManual instance. +func NewDNSProviderManual() (*DNSProviderManual, error) { + return &DNSProviderManual{}, nil +} + +// Present prints instructions for manually creating the TXT record +func (*DNSProviderManual) Present(domain, token, keyAuth string) error { + fqdn, value, ttl := DNS01Record(domain, keyAuth) + dnsRecord := fmt.Sprintf(dnsTemplate, fqdn, ttl, value) + + authZone, err := FindZoneByFqdn(fqdn, RecursiveNameservers) + if err != nil { + return err + } + + logf("[INFO] acme: Please create the following TXT record in your %s zone:", authZone) + logf("[INFO] acme: %s", dnsRecord) + logf("[INFO] acme: Press 'Enter' when you are done") + + reader := bufio.NewReader(os.Stdin) + _, _ = reader.ReadString('\n') + return nil +} + +// CleanUp prints instructions for manually removing the TXT record +func (*DNSProviderManual) CleanUp(domain, token, keyAuth string) error { + fqdn, _, ttl := DNS01Record(domain, keyAuth) + dnsRecord := fmt.Sprintf(dnsTemplate, fqdn, ttl, "...") + + authZone, err := FindZoneByFqdn(fqdn, RecursiveNameservers) + if err != nil { + return err + } + + logf("[INFO] acme: You can now remove this TXT record from your %s zone:", authZone) + logf("[INFO] acme: %s", dnsRecord) + return nil +} diff --git a/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/error.go b/vendor/github.com/xenolf/lego/acme/error.go similarity index 78% rename from vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/error.go rename to vendor/github.com/xenolf/lego/acme/error.go index b32561a3..2aa690b3 100644 --- a/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/error.go +++ b/vendor/github.com/xenolf/lego/acme/error.go @@ -3,6 +3,7 @@ package acme import ( "encoding/json" "fmt" + "io/ioutil" "net/http" "strings" ) @@ -52,10 +53,22 @@ func (c challengeError) Error() string { func handleHTTPError(resp *http.Response) error { var errorDetail RemoteError - decoder := json.NewDecoder(resp.Body) - err := decoder.Decode(&errorDetail) - if err != nil { - return err + + contenType := resp.Header.Get("Content-Type") + // try to decode the content as JSON + if contenType == "application/json" || contenType == "application/problem+json" { + decoder := json.NewDecoder(resp.Body) + err := decoder.Decode(&errorDetail) + if err != nil { + return err + } + } else { + detailBytes, err := ioutil.ReadAll(limitReader(resp.Body, 1024*1024)) + if err != nil { + return err + } + + errorDetail.Detail = string(detailBytes) } errorDetail.StatusCode = resp.StatusCode diff --git a/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/http.go b/vendor/github.com/xenolf/lego/acme/http.go similarity index 95% rename from vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/http.go rename to vendor/github.com/xenolf/lego/acme/http.go index 410aead6..3b5a37cb 100644 --- a/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/http.go +++ b/vendor/github.com/xenolf/lego/acme/http.go @@ -14,8 +14,11 @@ import ( // UserAgent (if non-empty) will be tacked onto the User-Agent string in requests. var UserAgent string +// HTTPTimeout is used to override the default HTTP timeout of 10 seconds. +var HTTPTimeout = 10 * time.Second + // defaultClient is an HTTP client with a reasonable timeout value. -var defaultClient = http.Client{Timeout: 10 * time.Second} +var defaultClient = http.Client{Timeout: HTTPTimeout} const ( // defaultGoUserAgent is the Go HTTP package user agent string. Too diff --git a/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/http_challenge.go b/vendor/github.com/xenolf/lego/acme/http_challenge.go similarity index 100% rename from vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/http_challenge.go rename to vendor/github.com/xenolf/lego/acme/http_challenge.go diff --git a/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/http_challenge_server.go b/vendor/github.com/xenolf/lego/acme/http_challenge_server.go similarity index 100% rename from vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/http_challenge_server.go rename to vendor/github.com/xenolf/lego/acme/http_challenge_server.go diff --git a/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/jws.go b/vendor/github.com/xenolf/lego/acme/jws.go similarity index 96% rename from vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/jws.go rename to vendor/github.com/xenolf/lego/acme/jws.go index 8435d0cf..e000bc64 100644 --- a/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/jws.go +++ b/vendor/github.com/xenolf/lego/acme/jws.go @@ -101,7 +101,9 @@ func (j *jws) Nonce() (string, error) { return nonce, err } } - + if len(j.nonces) == 0 { + return "", fmt.Errorf("Can't get nonce") + } nonce, j.nonces = j.nonces[len(j.nonces)-1], j.nonces[:len(j.nonces)-1] return nonce, nil } diff --git a/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/messages.go b/vendor/github.com/xenolf/lego/acme/messages.go similarity index 98% rename from vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/messages.go rename to vendor/github.com/xenolf/lego/acme/messages.go index d1fac920..a6539b96 100644 --- a/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/messages.go +++ b/vendor/github.com/xenolf/lego/acme/messages.go @@ -22,6 +22,7 @@ type recoveryKeyMessage struct { type registrationMessage struct { Resource string `json:"resource"` Contact []string `json:"contact"` + Delete bool `json:"delete,omitempty"` // RecoveryKey recoveryKeyMessage `json:"recoveryKey,omitempty"` } diff --git a/vendor/github.com/xenolf/lego/acme/pop_challenge.go b/vendor/github.com/xenolf/lego/acme/pop_challenge.go new file mode 100644 index 00000000..8d2a213b --- /dev/null +++ b/vendor/github.com/xenolf/lego/acme/pop_challenge.go @@ -0,0 +1 @@ +package acme diff --git a/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/provider.go b/vendor/github.com/xenolf/lego/acme/provider.go similarity index 100% rename from vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/provider.go rename to vendor/github.com/xenolf/lego/acme/provider.go diff --git a/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/tls_sni_challenge.go b/vendor/github.com/xenolf/lego/acme/tls_sni_challenge.go similarity index 85% rename from vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/tls_sni_challenge.go rename to vendor/github.com/xenolf/lego/acme/tls_sni_challenge.go index f184b17a..34383cbf 100644 --- a/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/tls_sni_challenge.go +++ b/vendor/github.com/xenolf/lego/acme/tls_sni_challenge.go @@ -41,7 +41,7 @@ func (t *tlsSNIChallenge) Solve(chlng challenge, domain string) error { } // TLSSNI01ChallengeCert returns a certificate and target domain for the `tls-sni-01` challenge -func TLSSNI01ChallengeCertDomain(keyAuth string) (tls.Certificate, string, error) { +func TLSSNI01ChallengeCert(keyAuth string) (tls.Certificate, string, error) { // generate a new RSA key for the certificates tempPrivKey, err := generatePrivateKey(RSA2048) if err != nil { @@ -65,9 +65,3 @@ func TLSSNI01ChallengeCertDomain(keyAuth string) (tls.Certificate, string, error return certificate, domain, nil } - -// TLSSNI01ChallengeCert returns a certificate for the `tls-sni-01` challenge -func TLSSNI01ChallengeCert(keyAuth string) (tls.Certificate, error) { - cert, _, err := TLSSNI01ChallengeCertDomain(keyAuth) - return cert, err -} diff --git a/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/tls_sni_challenge_server.go b/vendor/github.com/xenolf/lego/acme/tls_sni_challenge_server.go similarity index 96% rename from vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/tls_sni_challenge_server.go rename to vendor/github.com/xenolf/lego/acme/tls_sni_challenge_server.go index faaf16f6..df00fbb5 100644 --- a/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/tls_sni_challenge_server.go +++ b/vendor/github.com/xenolf/lego/acme/tls_sni_challenge_server.go @@ -30,7 +30,7 @@ func (s *TLSProviderServer) Present(domain, token, keyAuth string) error { s.port = "443" } - cert, err := TLSSNI01ChallengeCert(keyAuth) + cert, _, err := TLSSNI01ChallengeCert(keyAuth) if err != nil { return err } diff --git a/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/utils.go b/vendor/github.com/xenolf/lego/acme/utils.go similarity index 100% rename from vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/utils.go rename to vendor/github.com/xenolf/lego/acme/utils.go diff --git a/vendor/github.com/yvasiyarov/go-metrics/.gitignore b/vendor/github.com/yvasiyarov/go-metrics/.gitignore deleted file mode 100644 index 83c8f823..00000000 --- a/vendor/github.com/yvasiyarov/go-metrics/.gitignore +++ /dev/null @@ -1,9 +0,0 @@ -*.[68] -*.a -*.out -*.swp -_obj -_testmain.go -cmd/metrics-bench/metrics-bench -cmd/metrics-example/metrics-example -cmd/never-read/never-read diff --git a/vendor/github.com/yvasiyarov/go-metrics/README.md b/vendor/github.com/yvasiyarov/go-metrics/README.md deleted file mode 100644 index e0091a4b..00000000 --- a/vendor/github.com/yvasiyarov/go-metrics/README.md +++ /dev/null @@ -1,104 +0,0 @@ -go-metrics -========== - -Go port of Coda Hale's Metrics library: . - -Documentation: . - -Usage ------ - -Create and update metrics: - -```go -c := metrics.NewCounter() -metrics.Register("foo", c) -c.Inc(47) - -g := metrics.NewGauge() -metrics.Register("bar", g) -g.Update(47) - -s := metrics.NewExpDecaySample(1028, 0.015) // or metrics.NewUniformSample(1028) -h := metrics.NewHistogram(s) -metrics.Register("baz", h) -h.Update(47) - -m := metrics.NewMeter() -metrics.Register("quux", m) -m.Mark(47) - -t := metrics.NewTimer() -metrics.Register("bang", t) -t.Time(func() {}) -t.Update(47) -``` - -Periodically log every metric in human-readable form to standard error: - -```go -go metrics.Log(metrics.DefaultRegistry, 60e9, log.New(os.Stderr, "metrics: ", log.Lmicroseconds)) -``` - -Periodically log every metric in slightly-more-parseable form to syslog: - -```go -w, _ := syslog.Dial("unixgram", "/dev/log", syslog.LOG_INFO, "metrics") -go metrics.Syslog(metrics.DefaultRegistry, 60e9, w) -``` - -Periodically emit every metric to Graphite: - -```go -addr, _ := net.ResolveTCPAddr("tcp", "127.0.0.1:2003") -go metrics.Graphite(metrics.DefaultRegistry, 10e9, "metrics", addr) -``` - -Periodically emit every metric into InfluxDB: - -```go -import "github.com/rcrowley/go-metrics/influxdb" - -go influxdb.Influxdb(metrics.DefaultRegistry, 10e9, &influxdb.Config{ - Host: "127.0.0.1:8086", - Database: "metrics", - Username: "test", - Password: "test", -}) -``` - -Periodically upload every metric to Librato: - -```go -import "github.com/rcrowley/go-metrics/librato" - -go librato.Librato(metrics.DefaultRegistry, - 10e9, // interval - "example@example.com", // account owner email address - "token", // Librato API token - "hostname", // source - []float64{0.95}, // precentiles to send - time.Millisecond, // time unit -) -``` - -Periodically emit every metric to StatHat: - -```go -import "github.com/rcrowley/go-metrics/stathat" - -go stathat.Stathat(metrics.DefaultRegistry, 10e9, "example@example.com") -``` - -Installation ------------- - -```sh -go get github.com/rcrowley/go-metrics -``` - -StatHat support additionally requires their Go client: - -```sh -go get github.com/stathat/go -``` diff --git a/vendor/github.com/yvasiyarov/go-metrics/memory.md b/vendor/github.com/yvasiyarov/go-metrics/memory.md deleted file mode 100644 index 47454f54..00000000 --- a/vendor/github.com/yvasiyarov/go-metrics/memory.md +++ /dev/null @@ -1,285 +0,0 @@ -Memory usage -============ - -(Highly unscientific.) - -Command used to gather static memory usage: - -```sh -grep ^Vm "/proc/$(ps fax | grep [m]etrics-bench | awk '{print $1}')/status" -``` - -Program used to gather baseline memory usage: - -```go -package main - -import "time" - -func main() { - time.Sleep(600e9) -} -``` - -Baseline --------- - -``` -VmPeak: 42604 kB -VmSize: 42604 kB -VmLck: 0 kB -VmHWM: 1120 kB -VmRSS: 1120 kB -VmData: 35460 kB -VmStk: 136 kB -VmExe: 1020 kB -VmLib: 1848 kB -VmPTE: 36 kB -VmSwap: 0 kB -``` - -Program used to gather metric memory usage (with other metrics being similar): - -```go -package main - -import ( - "fmt" - "metrics" - "time" -) - -func main() { - fmt.Sprintf("foo") - metrics.NewRegistry() - time.Sleep(600e9) -} -``` - -1000 counters registered ------------------------- - -``` -VmPeak: 44016 kB -VmSize: 44016 kB -VmLck: 0 kB -VmHWM: 1928 kB -VmRSS: 1928 kB -VmData: 36868 kB -VmStk: 136 kB -VmExe: 1024 kB -VmLib: 1848 kB -VmPTE: 40 kB -VmSwap: 0 kB -``` - -**1.412 kB virtual, TODO 0.808 kB resident per counter.** - -100000 counters registered --------------------------- - -``` -VmPeak: 55024 kB -VmSize: 55024 kB -VmLck: 0 kB -VmHWM: 12440 kB -VmRSS: 12440 kB -VmData: 47876 kB -VmStk: 136 kB -VmExe: 1024 kB -VmLib: 1848 kB -VmPTE: 64 kB -VmSwap: 0 kB -``` - -**0.1242 kB virtual, 0.1132 kB resident per counter.** - -1000 gauges registered ----------------------- - -``` -VmPeak: 44012 kB -VmSize: 44012 kB -VmLck: 0 kB -VmHWM: 1928 kB -VmRSS: 1928 kB -VmData: 36868 kB -VmStk: 136 kB -VmExe: 1020 kB -VmLib: 1848 kB -VmPTE: 40 kB -VmSwap: 0 kB -``` - -**1.408 kB virtual, 0.808 kB resident per counter.** - -100000 gauges registered ------------------------- - -``` -VmPeak: 55020 kB -VmSize: 55020 kB -VmLck: 0 kB -VmHWM: 12432 kB -VmRSS: 12432 kB -VmData: 47876 kB -VmStk: 136 kB -VmExe: 1020 kB -VmLib: 1848 kB -VmPTE: 60 kB -VmSwap: 0 kB -``` - -**0.12416 kB virtual, 0.11312 resident per gauge.** - -1000 histograms with a uniform sample size of 1028 --------------------------------------------------- - -``` -VmPeak: 72272 kB -VmSize: 72272 kB -VmLck: 0 kB -VmHWM: 16204 kB -VmRSS: 16204 kB -VmData: 65100 kB -VmStk: 136 kB -VmExe: 1048 kB -VmLib: 1848 kB -VmPTE: 80 kB -VmSwap: 0 kB -``` - -**29.668 kB virtual, TODO 15.084 resident per histogram.** - -10000 histograms with a uniform sample size of 1028 ---------------------------------------------------- - -``` -VmPeak: 256912 kB -VmSize: 256912 kB -VmLck: 0 kB -VmHWM: 146204 kB -VmRSS: 146204 kB -VmData: 249740 kB -VmStk: 136 kB -VmExe: 1048 kB -VmLib: 1848 kB -VmPTE: 448 kB -VmSwap: 0 kB -``` - -**21.4308 kB virtual, 14.5084 kB resident per histogram.** - -50000 histograms with a uniform sample size of 1028 ---------------------------------------------------- - -``` -VmPeak: 908112 kB -VmSize: 908112 kB -VmLck: 0 kB -VmHWM: 645832 kB -VmRSS: 645588 kB -VmData: 900940 kB -VmStk: 136 kB -VmExe: 1048 kB -VmLib: 1848 kB -VmPTE: 1716 kB -VmSwap: 1544 kB -``` - -**17.31016 kB virtual, 12.88936 kB resident per histogram.** - -1000 histograms with an exponentially-decaying sample size of 1028 and alpha of 0.015 -------------------------------------------------------------------------------------- - -``` -VmPeak: 62480 kB -VmSize: 62480 kB -VmLck: 0 kB -VmHWM: 11572 kB -VmRSS: 11572 kB -VmData: 55308 kB -VmStk: 136 kB -VmExe: 1048 kB -VmLib: 1848 kB -VmPTE: 64 kB -VmSwap: 0 kB -``` - -**19.876 kB virtual, 10.452 kB resident per histogram.** - -10000 histograms with an exponentially-decaying sample size of 1028 and alpha of 0.015 --------------------------------------------------------------------------------------- - -``` -VmPeak: 153296 kB -VmSize: 153296 kB -VmLck: 0 kB -VmHWM: 101176 kB -VmRSS: 101176 kB -VmData: 146124 kB -VmStk: 136 kB -VmExe: 1048 kB -VmLib: 1848 kB -VmPTE: 240 kB -VmSwap: 0 kB -``` - -**11.0692 kB virtual, 10.0056 kB resident per histogram.** - -50000 histograms with an exponentially-decaying sample size of 1028 and alpha of 0.015 --------------------------------------------------------------------------------------- - -``` -VmPeak: 557264 kB -VmSize: 557264 kB -VmLck: 0 kB -VmHWM: 501056 kB -VmRSS: 501056 kB -VmData: 550092 kB -VmStk: 136 kB -VmExe: 1048 kB -VmLib: 1848 kB -VmPTE: 1032 kB -VmSwap: 0 kB -``` - -**10.2932 kB virtual, 9.99872 kB resident per histogram.** - -1000 meters ------------ - -``` -VmPeak: 74504 kB -VmSize: 74504 kB -VmLck: 0 kB -VmHWM: 24124 kB -VmRSS: 24124 kB -VmData: 67340 kB -VmStk: 136 kB -VmExe: 1040 kB -VmLib: 1848 kB -VmPTE: 92 kB -VmSwap: 0 kB -``` - -**31.9 kB virtual, 23.004 kB resident per meter.** - -10000 meters ------------- - -``` -VmPeak: 278920 kB -VmSize: 278920 kB -VmLck: 0 kB -VmHWM: 227300 kB -VmRSS: 227300 kB -VmData: 271756 kB -VmStk: 136 kB -VmExe: 1040 kB -VmLib: 1848 kB -VmPTE: 488 kB -VmSwap: 0 kB -``` - -**23.6316 kB virtual, 22.618 kB resident per meter.** diff --git a/vendor/github.com/yvasiyarov/gorelic/.gitignore b/vendor/github.com/yvasiyarov/gorelic/.gitignore deleted file mode 100644 index ca502e29..00000000 --- a/vendor/github.com/yvasiyarov/gorelic/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -*.nut -*.swp -examples/example1 -examples/example_web diff --git a/vendor/github.com/yvasiyarov/gorelic/.travis.yml b/vendor/github.com/yvasiyarov/gorelic/.travis.yml deleted file mode 100644 index 4f2ee4d9..00000000 --- a/vendor/github.com/yvasiyarov/gorelic/.travis.yml +++ /dev/null @@ -1 +0,0 @@ -language: go diff --git a/vendor/github.com/yvasiyarov/gorelic/README.md b/vendor/github.com/yvasiyarov/gorelic/README.md deleted file mode 100644 index 61068a82..00000000 --- a/vendor/github.com/yvasiyarov/gorelic/README.md +++ /dev/null @@ -1,119 +0,0 @@ -# GoRelic - -New Relic agent for Go runtime. It collect a lot of metrics about scheduler, garbage collector and memory allocator and -send them to NewRelic. - -### Requirements -- Go 1.1 or higher -- github.com/yvasiyarov/gorelic -- github.com/yvasiyarov/newrelic_platform_go -- github.com/yvasiyarov/go-metrics - -You have to install manually only first two dependencies. All other dependencies will be installed automatically -by Go toolchain. - -### Installation -```bash -go get github.com/yvasiyarov/gorelic -``` -and add to the initialization part of your application following code: -```go -import ( - "github.com/yvasiyarov/gorelic" -) -.... - -agent := gorelic.NewAgent() -agent.Verbose = true -agent.NewrelicLicense = "YOUR NEWRELIC LICENSE KEY THERE" -agent.Run() - -``` - -### Middleware -If you using Beego, Martini, Revel or Gin framework you can hook up gorelic with your application by using the following middleware: -- https://github.com/yvasiyarov/beego_gorelic -- https://github.com/yvasiyarov/martini_gorelic -- https://github.com/yvasiyarov/gocraft_gorelic -- http://wiki.colar.net/revel_newelic -- https://github.com/jingweno/negroni-gorelic -- https://github.com/brandfolder/gin-gorelic - - -### Configuration -- NewrelicLicense - its the only mandatory setting of this agent. -- NewrelicName - component name in NewRelic dashboard. Default value: "Go daemon" -- NewrelicPollInterval - how often metrics will be sent to NewRelic. Default value: 60 seconds -- Verbose - print some usefull for debugging information. Default value: false -- CollectGcStat - should agent collect garbage collector statistic or not. Default value: true -- CollectHTTPStat - should agent collect HTTP metrics. Default value: false -- CollectMemoryStat - should agent collect memory allocator statistic or not. Default value: true -- GCPollInterval - how often should GC statistic collected. Default value: 10 seconds. It has performance impact. For more information, please, see metrics documentation. -- MemoryAllocatorPollInterval - how often should memory allocator statistic collected. Default value: 60 seconds. It has performance impact. For more information, please, read metrics documentation. - - -## Metrics reported by plugin -This agent use functions exposed by runtime or runtime/debug packages to collect most important information about Go runtime. - -### General metrics -- Runtime/General/NOGoroutines - number of runned go routines, as it reported by NumGoroutine() from runtime package -- Runtime/General/NOCgoCalls - number of runned cgo calls, as it reported by NumCgoCall() from runtime package - -### Garbage collector metrics -- Runtime/GC/NumberOfGCCalls - Nuber of GC calls, as it reported by ReadGCStats() from runtime/debug -- Runtime/GC/PauseTotalTime - Total pause time diring GC calls, as it reported by ReadGCStats() from runtime/debug (in nanoseconds) -- Runtime/GC/GCTime/Max - max GC time -- Runtime/GC/GCTime/Min - min GC time -- Runtime/GC/GCTime/Mean - GC mean time -- Runtime/GC/GCTime/Percentile95 - 95% percentile of GC time - -All this metrics are measured in nanoseconds. Last 4 of them can be inaccurate if GC called more often then once in GCPollInterval. -If in your workload GC is called more often - you can consider decreasing value of GCPollInterval. -But be carefull, ReadGCStats() blocks mheap, so its not good idea to set GCPollInterval to very low values. - -### Memory allocator -- Component/Runtime/Memory/SysMem/Total - number of bytes/minute allocated from OS totally. -- Component/Runtime/Memory/SysMem/Stack - number of bytes/minute allocated from OS for stacks. -- Component/Runtime/Memory/SysMem/MSpan - number of bytes/minute allocated from OS for internal MSpan structs. -- Component/Runtime/Memory/SysMem/MCache - number of bytes/minute allocated from OS for internal MCache structs. -- Component/Runtime/Memory/SysMem/Heap - number of bytes/minute allocated from OS for heap. -- Component/Runtime/Memory/SysMem/BuckHash - number of bytes/minute allocated from OS for internal BuckHash structs. -- Component/Runtime/Memory/Operations/NoFrees - number of memory frees per minute -- Component/Runtime/Memory/Operations/NoMallocs - number of memory allocations per minute -- Component/Runtime/Memory/Operations/NoPointerLookups - number of pointer lookups per minute -- Component/Runtime/Memory/InUse/Total - total amount of memory in use -- Component/Runtime/Memory/InUse/Heap - amount of memory in use for heap -- Component/Runtime/Memory/InUse/MCacheInuse - amount of memory in use for MCache internal structures -- Component/Runtime/Memory/InUse/MSpanInuse - amount of memory in use for MSpan internal structures -- Component/Runtime/Memory/InUse/Stack - amount of memory in use for stacks - -### Process metrics -- Component/Runtime/System/Threads - number of OS threads used -- Runtime/System/FDSize - number of file descriptors, used by process -- Runtime/System/Memory/VmPeakSize - VM max size -- Runtime/System/Memory/VmCurrent - VM current size -- Runtime/System/Memory/RssPeak - max size of resident memory set -- Runtime/System/Memory/RssCurrent - current size of resident memory set - -All this metrics collected once in MemoryAllocatorPollInterval. In order to collect this statistic agent use ReadMemStats() routine. -This routine calls stoptheworld() internally and it block everything. So, please, consider this when you change MemoryAllocatorPollInterval value. - -### HTTP metrics -- throughput (requests per second), calculated for last minute -- mean throughput (requests per second) -- mean response time -- min response time -- max response time -- 75%, 90%, 95% percentiles for response time - - -In order to collect HTTP metrics, handler functions must be wrapped using WrapHTTPHandlerFunc: - -```go -http.HandleFunc("/", agent.WrapHTTPHandlerFunc(handler)) -``` - -## TODO -- Collect per-size allocation statistic -- Collect user defined metrics - diff --git a/vendor/github.com/yvasiyarov/gorelic/nut.json b/vendor/github.com/yvasiyarov/gorelic/nut.json deleted file mode 100644 index 7abb8ec6..00000000 --- a/vendor/github.com/yvasiyarov/gorelic/nut.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "Version": "0.0.6", - "Vendor": "yvasiyarov", - "Authors": [ - { - "FullName": "Yuriy Vasiyarov", - "Email": "varyous@gmail.com" - } - ], - "ExtraFiles": [ - "README.md", - "LICENSE" - ], - "Homepage": "https://github.com/yvasiyarov/gorelic" -} diff --git a/vendor/github.com/yvasiyarov/newrelic_platform_go/.travis.yml b/vendor/github.com/yvasiyarov/newrelic_platform_go/.travis.yml deleted file mode 100644 index 4f2ee4d9..00000000 --- a/vendor/github.com/yvasiyarov/newrelic_platform_go/.travis.yml +++ /dev/null @@ -1 +0,0 @@ -language: go diff --git a/vendor/github.com/yvasiyarov/newrelic_platform_go/README.md b/vendor/github.com/yvasiyarov/newrelic_platform_go/README.md deleted file mode 100644 index 34462344..00000000 --- a/vendor/github.com/yvasiyarov/newrelic_platform_go/README.md +++ /dev/null @@ -1,11 +0,0 @@ -New Relic Platform Agent SDK for Go(golang) -==================== - -[![Build Status](https://travis-ci.org/yvasiyarov/newrelic_platform_go.png?branch=master)](https://travis-ci.org/yvasiyarov/newrelic_platform_go) - -This package provide very simple interface to NewRelic Platform http://newrelic.com/platform - -For example of usage see examples/wave_plugin.go - -For real-word example, you can have a look at: -https://github.com/yvasiyarov/newrelic_sphinx diff --git a/vendor/github.com/yvasiyarov/newrelic_platform_go/nut.json b/vendor/github.com/yvasiyarov/newrelic_platform_go/nut.json deleted file mode 100644 index 1e57c395..00000000 --- a/vendor/github.com/yvasiyarov/newrelic_platform_go/nut.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "Version": "0.0.1", - "Vendor": "yvasiyarov", - "Authors": [ - { - "FullName": "Yuriy Vasiyarov", - "Email": "varyous@gmail.com" - } - ], - "ExtraFiles": [ - "README.md", - "LICENSE" - ], - "Homepage": "https://github.com/yvasiyarov/newrelic_platform_go" -} diff --git a/vendor/golang.org/x/crypto/bcrypt/bcrypt.go b/vendor/golang.org/x/crypto/bcrypt/bcrypt.go index b8e18d74..f8b807f9 100644 --- a/vendor/golang.org/x/crypto/bcrypt/bcrypt.go +++ b/vendor/golang.org/x/crypto/bcrypt/bcrypt.go @@ -4,7 +4,7 @@ // Package bcrypt implements Provos and Mazières's bcrypt adaptive hashing // algorithm. See http://www.usenix.org/event/usenix99/provos/provos.pdf -package bcrypt +package bcrypt // import "golang.org/x/crypto/bcrypt" // The code is a port of Provos and Mazières's C implementation. import ( diff --git a/vendor/golang.org/x/crypto/blowfish/cipher.go b/vendor/golang.org/x/crypto/blowfish/cipher.go index 5019658a..542984aa 100644 --- a/vendor/golang.org/x/crypto/blowfish/cipher.go +++ b/vendor/golang.org/x/crypto/blowfish/cipher.go @@ -3,7 +3,7 @@ // license that can be found in the LICENSE file. // Package blowfish implements Bruce Schneier's Blowfish encryption algorithm. -package blowfish +package blowfish // import "golang.org/x/crypto/blowfish" // The code is a port of Bruce Schneier's C implementation. // See http://www.schneier.com/blowfish.html. diff --git a/vendor/golang.org/x/crypto/ocsp/ocsp.go b/vendor/golang.org/x/crypto/ocsp/ocsp.go index f6a1bd4d..602fefa6 100644 --- a/vendor/golang.org/x/crypto/ocsp/ocsp.go +++ b/vendor/golang.org/x/crypto/ocsp/ocsp.go @@ -5,7 +5,7 @@ // Package ocsp parses OCSP responses as specified in RFC 2560. OCSP responses // are signed messages attesting to the validity of a certificate for a small // period of time. This is used to manage revocation for X.509 certificates. -package ocsp +package ocsp // import "golang.org/x/crypto/ocsp" import ( "crypto" diff --git a/vendor/golang.org/x/net/context/context.go b/vendor/golang.org/x/net/context/context.go index 46629881..19235cf2 100644 --- a/vendor/golang.org/x/net/context/context.go +++ b/vendor/golang.org/x/net/context/context.go @@ -34,7 +34,7 @@ // // See http://blog.golang.org/context for example code for a server that uses // Contexts. -package context +package context // import "golang.org/x/net/context" import ( "errors" diff --git a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go index e35860a7..a7ed8d81 100644 --- a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go +++ b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go @@ -3,7 +3,7 @@ // license that can be found in the LICENSE file. // Package ctxhttp provides helper functions for performing context-aware HTTP requests. -package ctxhttp +package ctxhttp // import "golang.org/x/net/context/ctxhttp" import ( "io" diff --git a/vendor/golang.org/x/net/http2/.gitignore b/vendor/golang.org/x/net/http2/.gitignore deleted file mode 100644 index 190f1223..00000000 --- a/vendor/golang.org/x/net/http2/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -*~ -h2i/h2i diff --git a/vendor/golang.org/x/net/http2/Dockerfile b/vendor/golang.org/x/net/http2/Dockerfile deleted file mode 100644 index 53fc5257..00000000 --- a/vendor/golang.org/x/net/http2/Dockerfile +++ /dev/null @@ -1,51 +0,0 @@ -# -# This Dockerfile builds a recent curl with HTTP/2 client support, using -# a recent nghttp2 build. -# -# See the Makefile for how to tag it. If Docker and that image is found, the -# Go tests use this curl binary for integration tests. -# - -FROM ubuntu:trusty - -RUN apt-get update && \ - apt-get upgrade -y && \ - apt-get install -y git-core build-essential wget - -RUN apt-get install -y --no-install-recommends \ - autotools-dev libtool pkg-config zlib1g-dev \ - libcunit1-dev libssl-dev libxml2-dev libevent-dev \ - automake autoconf - -# The list of packages nghttp2 recommends for h2load: -RUN apt-get install -y --no-install-recommends make binutils \ - autoconf automake autotools-dev \ - libtool pkg-config zlib1g-dev libcunit1-dev libssl-dev libxml2-dev \ - libev-dev libevent-dev libjansson-dev libjemalloc-dev \ - cython python3.4-dev python-setuptools - -# Note: setting NGHTTP2_VER before the git clone, so an old git clone isn't cached: -ENV NGHTTP2_VER 895da9a -RUN cd /root && git clone https://github.com/tatsuhiro-t/nghttp2.git - -WORKDIR /root/nghttp2 -RUN git reset --hard $NGHTTP2_VER -RUN autoreconf -i -RUN automake -RUN autoconf -RUN ./configure -RUN make -RUN make install - -WORKDIR /root -RUN wget http://curl.haxx.se/download/curl-7.45.0.tar.gz -RUN tar -zxvf curl-7.45.0.tar.gz -WORKDIR /root/curl-7.45.0 -RUN ./configure --with-ssl --with-nghttp2=/usr/local -RUN make -RUN make install -RUN ldconfig - -CMD ["-h"] -ENTRYPOINT ["/usr/local/bin/curl"] - diff --git a/vendor/golang.org/x/net/http2/Makefile b/vendor/golang.org/x/net/http2/Makefile deleted file mode 100644 index 55fd826f..00000000 --- a/vendor/golang.org/x/net/http2/Makefile +++ /dev/null @@ -1,3 +0,0 @@ -curlimage: - docker build -t gohttp2/curl . - diff --git a/vendor/golang.org/x/net/http2/README b/vendor/golang.org/x/net/http2/README deleted file mode 100644 index 360d5aa3..00000000 --- a/vendor/golang.org/x/net/http2/README +++ /dev/null @@ -1,20 +0,0 @@ -This is a work-in-progress HTTP/2 implementation for Go. - -It will eventually live in the Go standard library and won't require -any changes to your code to use. It will just be automatic. - -Status: - -* The server support is pretty good. A few things are missing - but are being worked on. -* The client work has just started but shares a lot of code - is coming along much quicker. - -Docs are at https://godoc.org/golang.org/x/net/http2 - -Demo test server at https://http2.golang.org/ - -Help & bug reports welcome! - -Contributing: https://golang.org/doc/contribute.html -Bugs: https://golang.org/issue/new?title=x/net/http2:+ diff --git a/vendor/golang.org/x/net/idna/idna.go b/vendor/golang.org/x/net/idna/idna.go new file mode 100644 index 00000000..3daa8979 --- /dev/null +++ b/vendor/golang.org/x/net/idna/idna.go @@ -0,0 +1,68 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package idna implements IDNA2008 (Internationalized Domain Names for +// Applications), defined in RFC 5890, RFC 5891, RFC 5892, RFC 5893 and +// RFC 5894. +package idna // import "golang.org/x/net/idna" + +import ( + "strings" + "unicode/utf8" +) + +// TODO(nigeltao): specify when errors occur. For example, is ToASCII(".") or +// ToASCII("foo\x00") an error? See also http://www.unicode.org/faq/idn.html#11 + +// acePrefix is the ASCII Compatible Encoding prefix. +const acePrefix = "xn--" + +// ToASCII converts a domain or domain label to its ASCII form. For example, +// ToASCII("bücher.example.com") is "xn--bcher-kva.example.com", and +// ToASCII("golang") is "golang". +func ToASCII(s string) (string, error) { + if ascii(s) { + return s, nil + } + labels := strings.Split(s, ".") + for i, label := range labels { + if !ascii(label) { + a, err := encode(acePrefix, label) + if err != nil { + return "", err + } + labels[i] = a + } + } + return strings.Join(labels, "."), nil +} + +// ToUnicode converts a domain or domain label to its Unicode form. For example, +// ToUnicode("xn--bcher-kva.example.com") is "bücher.example.com", and +// ToUnicode("golang") is "golang". +func ToUnicode(s string) (string, error) { + if !strings.Contains(s, acePrefix) { + return s, nil + } + labels := strings.Split(s, ".") + for i, label := range labels { + if strings.HasPrefix(label, acePrefix) { + u, err := decode(label[len(acePrefix):]) + if err != nil { + return "", err + } + labels[i] = u + } + } + return strings.Join(labels, "."), nil +} + +func ascii(s string) bool { + for i := 0; i < len(s); i++ { + if s[i] >= utf8.RuneSelf { + return false + } + } + return true +} diff --git a/vendor/golang.org/x/net/idna/punycode.go b/vendor/golang.org/x/net/idna/punycode.go new file mode 100644 index 00000000..92e733f6 --- /dev/null +++ b/vendor/golang.org/x/net/idna/punycode.go @@ -0,0 +1,200 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package idna + +// This file implements the Punycode algorithm from RFC 3492. + +import ( + "fmt" + "math" + "strings" + "unicode/utf8" +) + +// These parameter values are specified in section 5. +// +// All computation is done with int32s, so that overflow behavior is identical +// regardless of whether int is 32-bit or 64-bit. +const ( + base int32 = 36 + damp int32 = 700 + initialBias int32 = 72 + initialN int32 = 128 + skew int32 = 38 + tmax int32 = 26 + tmin int32 = 1 +) + +// decode decodes a string as specified in section 6.2. +func decode(encoded string) (string, error) { + if encoded == "" { + return "", nil + } + pos := 1 + strings.LastIndex(encoded, "-") + if pos == 1 { + return "", fmt.Errorf("idna: invalid label %q", encoded) + } + if pos == len(encoded) { + return encoded[:len(encoded)-1], nil + } + output := make([]rune, 0, len(encoded)) + if pos != 0 { + for _, r := range encoded[:pos-1] { + output = append(output, r) + } + } + i, n, bias := int32(0), initialN, initialBias + for pos < len(encoded) { + oldI, w := i, int32(1) + for k := base; ; k += base { + if pos == len(encoded) { + return "", fmt.Errorf("idna: invalid label %q", encoded) + } + digit, ok := decodeDigit(encoded[pos]) + if !ok { + return "", fmt.Errorf("idna: invalid label %q", encoded) + } + pos++ + i += digit * w + if i < 0 { + return "", fmt.Errorf("idna: invalid label %q", encoded) + } + t := k - bias + if t < tmin { + t = tmin + } else if t > tmax { + t = tmax + } + if digit < t { + break + } + w *= base - t + if w >= math.MaxInt32/base { + return "", fmt.Errorf("idna: invalid label %q", encoded) + } + } + x := int32(len(output) + 1) + bias = adapt(i-oldI, x, oldI == 0) + n += i / x + i %= x + if n > utf8.MaxRune || len(output) >= 1024 { + return "", fmt.Errorf("idna: invalid label %q", encoded) + } + output = append(output, 0) + copy(output[i+1:], output[i:]) + output[i] = n + i++ + } + return string(output), nil +} + +// encode encodes a string as specified in section 6.3 and prepends prefix to +// the result. +// +// The "while h < length(input)" line in the specification becomes "for +// remaining != 0" in the Go code, because len(s) in Go is in bytes, not runes. +func encode(prefix, s string) (string, error) { + output := make([]byte, len(prefix), len(prefix)+1+2*len(s)) + copy(output, prefix) + delta, n, bias := int32(0), initialN, initialBias + b, remaining := int32(0), int32(0) + for _, r := range s { + if r < 0x80 { + b++ + output = append(output, byte(r)) + } else { + remaining++ + } + } + h := b + if b > 0 { + output = append(output, '-') + } + for remaining != 0 { + m := int32(0x7fffffff) + for _, r := range s { + if m > r && r >= n { + m = r + } + } + delta += (m - n) * (h + 1) + if delta < 0 { + return "", fmt.Errorf("idna: invalid label %q", s) + } + n = m + for _, r := range s { + if r < n { + delta++ + if delta < 0 { + return "", fmt.Errorf("idna: invalid label %q", s) + } + continue + } + if r > n { + continue + } + q := delta + for k := base; ; k += base { + t := k - bias + if t < tmin { + t = tmin + } else if t > tmax { + t = tmax + } + if q < t { + break + } + output = append(output, encodeDigit(t+(q-t)%(base-t))) + q = (q - t) / (base - t) + } + output = append(output, encodeDigit(q)) + bias = adapt(delta, h+1, h == b) + delta = 0 + h++ + remaining-- + } + delta++ + n++ + } + return string(output), nil +} + +func decodeDigit(x byte) (digit int32, ok bool) { + switch { + case '0' <= x && x <= '9': + return int32(x - ('0' - 26)), true + case 'A' <= x && x <= 'Z': + return int32(x - 'A'), true + case 'a' <= x && x <= 'z': + return int32(x - 'a'), true + } + return 0, false +} + +func encodeDigit(digit int32) byte { + switch { + case 0 <= digit && digit < 26: + return byte(digit + 'a') + case 26 <= digit && digit < 36: + return byte(digit + ('0' - 26)) + } + panic("idna: internal error in punycode encoding") +} + +// adapt is the bias adaptation function specified in section 6.1. +func adapt(delta, numPoints int32, firstTime bool) int32 { + if firstTime { + delta /= damp + } else { + delta /= 2 + } + delta += delta / numPoints + k := int32(0) + for delta > ((base-tmin)*tmax)/2 { + delta /= base - tmin + k += base + } + return k + (base-tmin+1)*delta/(delta+skew) +} diff --git a/vendor/golang.org/x/net/internal/timeseries/timeseries.go b/vendor/golang.org/x/net/internal/timeseries/timeseries.go index 3f90b730..1119f344 100644 --- a/vendor/golang.org/x/net/internal/timeseries/timeseries.go +++ b/vendor/golang.org/x/net/internal/timeseries/timeseries.go @@ -3,7 +3,7 @@ // license that can be found in the LICENSE file. // Package timeseries implements a time series structure for stats collection. -package timeseries +package timeseries // import "golang.org/x/net/internal/timeseries" import ( "fmt" diff --git a/vendor/golang.org/x/net/publicsuffix/gen.go b/vendor/golang.org/x/net/publicsuffix/gen.go new file mode 100644 index 00000000..5c8d7b5f --- /dev/null +++ b/vendor/golang.org/x/net/publicsuffix/gen.go @@ -0,0 +1,663 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +package main + +// This program generates table.go and table_test.go. +// Invoke as: +// +// go run gen.go -version "xxx" >table.go +// go run gen.go -version "xxx" -test >table_test.go +// +// Pass -v to print verbose progress information. +// +// The version is derived from information found at +// https://github.com/publicsuffix/list/commits/master/public_suffix_list.dat +// +// To fetch a particular git revision, such as 5c70ccd250, pass +// -url "https://raw.githubusercontent.com/publicsuffix/list/5c70ccd250/public_suffix_list.dat" + +import ( + "bufio" + "bytes" + "flag" + "fmt" + "go/format" + "io" + "net/http" + "os" + "regexp" + "sort" + "strings" + + "golang.org/x/net/idna" +) + +const ( + // These sum of these four values must be no greater than 32. + nodesBitsChildren = 9 + nodesBitsICANN = 1 + nodesBitsTextOffset = 15 + nodesBitsTextLength = 6 + + // These sum of these four values must be no greater than 32. + childrenBitsWildcard = 1 + childrenBitsNodeType = 2 + childrenBitsHi = 14 + childrenBitsLo = 14 +) + +var ( + maxChildren int + maxTextOffset int + maxTextLength int + maxHi uint32 + maxLo uint32 +) + +func max(a, b int) int { + if a < b { + return b + } + return a +} + +func u32max(a, b uint32) uint32 { + if a < b { + return b + } + return a +} + +const ( + nodeTypeNormal = 0 + nodeTypeException = 1 + nodeTypeParentOnly = 2 + numNodeType = 3 +) + +func nodeTypeStr(n int) string { + switch n { + case nodeTypeNormal: + return "+" + case nodeTypeException: + return "!" + case nodeTypeParentOnly: + return "o" + } + panic("unreachable") +} + +var ( + labelEncoding = map[string]uint32{} + labelsList = []string{} + labelsMap = map[string]bool{} + rules = []string{} + + // validSuffix is used to check that the entries in the public suffix list + // are in canonical form (after Punycode encoding). Specifically, capital + // letters are not allowed. + validSuffix = regexp.MustCompile(`^[a-z0-9_\!\*\-\.]+$`) + + subset = flag.Bool("subset", false, "generate only a subset of the full table, for debugging") + url = flag.String("url", + "https://publicsuffix.org/list/effective_tld_names.dat", + "URL of the publicsuffix.org list. If empty, stdin is read instead") + v = flag.Bool("v", false, "verbose output (to stderr)") + version = flag.String("version", "", "the effective_tld_names.dat version") + test = flag.Bool("test", false, "generate table_test.go") +) + +func main() { + if err := main1(); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } +} + +func main1() error { + flag.Parse() + if nodesBitsTextLength+nodesBitsTextOffset+nodesBitsICANN+nodesBitsChildren > 32 { + return fmt.Errorf("not enough bits to encode the nodes table") + } + if childrenBitsLo+childrenBitsHi+childrenBitsNodeType+childrenBitsWildcard > 32 { + return fmt.Errorf("not enough bits to encode the children table") + } + if *version == "" { + return fmt.Errorf("-version was not specified") + } + var r io.Reader = os.Stdin + if *url != "" { + res, err := http.Get(*url) + if err != nil { + return err + } + if res.StatusCode != http.StatusOK { + return fmt.Errorf("bad GET status for %s: %d", *url, res.Status) + } + r = res.Body + defer res.Body.Close() + } + + var root node + icann := false + buf := new(bytes.Buffer) + br := bufio.NewReader(r) + for { + s, err := br.ReadString('\n') + if err != nil { + if err == io.EOF { + break + } + return err + } + s = strings.TrimSpace(s) + if strings.Contains(s, "BEGIN ICANN DOMAINS") { + icann = true + continue + } + if strings.Contains(s, "END ICANN DOMAINS") { + icann = false + continue + } + if s == "" || strings.HasPrefix(s, "//") { + continue + } + s, err = idna.ToASCII(s) + if err != nil { + return err + } + if !validSuffix.MatchString(s) { + return fmt.Errorf("bad publicsuffix.org list data: %q", s) + } + + if *subset { + switch { + case s == "ac.jp" || strings.HasSuffix(s, ".ac.jp"): + case s == "ak.us" || strings.HasSuffix(s, ".ak.us"): + case s == "ao" || strings.HasSuffix(s, ".ao"): + case s == "ar" || strings.HasSuffix(s, ".ar"): + case s == "arpa" || strings.HasSuffix(s, ".arpa"): + case s == "cy" || strings.HasSuffix(s, ".cy"): + case s == "dyndns.org" || strings.HasSuffix(s, ".dyndns.org"): + case s == "jp": + case s == "kobe.jp" || strings.HasSuffix(s, ".kobe.jp"): + case s == "kyoto.jp" || strings.HasSuffix(s, ".kyoto.jp"): + case s == "om" || strings.HasSuffix(s, ".om"): + case s == "uk" || strings.HasSuffix(s, ".uk"): + case s == "uk.com" || strings.HasSuffix(s, ".uk.com"): + case s == "tw" || strings.HasSuffix(s, ".tw"): + case s == "zw" || strings.HasSuffix(s, ".zw"): + case s == "xn--p1ai" || strings.HasSuffix(s, ".xn--p1ai"): + // xn--p1ai is Russian-Cyrillic "рф". + default: + continue + } + } + + rules = append(rules, s) + + nt, wildcard := nodeTypeNormal, false + switch { + case strings.HasPrefix(s, "*."): + s, nt = s[2:], nodeTypeParentOnly + wildcard = true + case strings.HasPrefix(s, "!"): + s, nt = s[1:], nodeTypeException + } + labels := strings.Split(s, ".") + for n, i := &root, len(labels)-1; i >= 0; i-- { + label := labels[i] + n = n.child(label) + if i == 0 { + if nt != nodeTypeParentOnly && n.nodeType == nodeTypeParentOnly { + n.nodeType = nt + } + n.icann = n.icann && icann + n.wildcard = n.wildcard || wildcard + } + labelsMap[label] = true + } + } + labelsList = make([]string, 0, len(labelsMap)) + for label := range labelsMap { + labelsList = append(labelsList, label) + } + sort.Strings(labelsList) + + p := printReal + if *test { + p = printTest + } + if err := p(buf, &root); err != nil { + return err + } + + b, err := format.Source(buf.Bytes()) + if err != nil { + return err + } + _, err = os.Stdout.Write(b) + return err +} + +func printTest(w io.Writer, n *node) error { + fmt.Fprintf(w, "// generated by go run gen.go; DO NOT EDIT\n\n") + fmt.Fprintf(w, "package publicsuffix\n\nvar rules = [...]string{\n") + for _, rule := range rules { + fmt.Fprintf(w, "%q,\n", rule) + } + fmt.Fprintf(w, "}\n\nvar nodeLabels = [...]string{\n") + if err := n.walk(w, printNodeLabel); err != nil { + return err + } + fmt.Fprintf(w, "}\n") + return nil +} + +func printReal(w io.Writer, n *node) error { + const header = `// generated by go run gen.go; DO NOT EDIT + +package publicsuffix + +const version = %q + +const ( + nodesBitsChildren = %d + nodesBitsICANN = %d + nodesBitsTextOffset = %d + nodesBitsTextLength = %d + + childrenBitsWildcard = %d + childrenBitsNodeType = %d + childrenBitsHi = %d + childrenBitsLo = %d +) + +const ( + nodeTypeNormal = %d + nodeTypeException = %d + nodeTypeParentOnly = %d +) + +// numTLD is the number of top level domains. +const numTLD = %d + +` + fmt.Fprintf(w, header, *version, + nodesBitsChildren, nodesBitsICANN, nodesBitsTextOffset, nodesBitsTextLength, + childrenBitsWildcard, childrenBitsNodeType, childrenBitsHi, childrenBitsLo, + nodeTypeNormal, nodeTypeException, nodeTypeParentOnly, len(n.children)) + + text := combineText(labelsList) + if text == "" { + return fmt.Errorf("internal error: makeText returned no text") + } + for _, label := range labelsList { + offset, length := strings.Index(text, label), len(label) + if offset < 0 { + return fmt.Errorf("internal error: could not find %q in text %q", label, text) + } + maxTextOffset, maxTextLength = max(maxTextOffset, offset), max(maxTextLength, length) + if offset >= 1<= 1< 64 { + n, plus = 64, " +" + } + fmt.Fprintf(w, "%q%s\n", text[:n], plus) + text = text[n:] + } + + if err := n.walk(w, assignIndexes); err != nil { + return err + } + + fmt.Fprintf(w, ` + +// nodes is the list of nodes. Each node is represented as a uint32, which +// encodes the node's children, wildcard bit and node type (as an index into +// the children array), ICANN bit and text. +// +// In the //-comment after each node's data, the nodes indexes of the children +// are formatted as (n0x1234-n0x1256), with * denoting the wildcard bit. The +// nodeType is printed as + for normal, ! for exception, and o for parent-only +// nodes that have children but don't match a domain label in their own right. +// An I denotes an ICANN domain. +// +// The layout within the uint32, from MSB to LSB, is: +// [%2d bits] unused +// [%2d bits] children index +// [%2d bits] ICANN bit +// [%2d bits] text index +// [%2d bits] text length +var nodes = [...]uint32{ +`, + 32-nodesBitsChildren-nodesBitsICANN-nodesBitsTextOffset-nodesBitsTextLength, + nodesBitsChildren, nodesBitsICANN, nodesBitsTextOffset, nodesBitsTextLength) + if err := n.walk(w, printNode); err != nil { + return err + } + fmt.Fprintf(w, `} + +// children is the list of nodes' children, the parent's wildcard bit and the +// parent's node type. If a node has no children then their children index +// will be in the range [0, 6), depending on the wildcard bit and node type. +// +// The layout within the uint32, from MSB to LSB, is: +// [%2d bits] unused +// [%2d bits] wildcard bit +// [%2d bits] node type +// [%2d bits] high nodes index (exclusive) of children +// [%2d bits] low nodes index (inclusive) of children +var children=[...]uint32{ +`, + 32-childrenBitsWildcard-childrenBitsNodeType-childrenBitsHi-childrenBitsLo, + childrenBitsWildcard, childrenBitsNodeType, childrenBitsHi, childrenBitsLo) + for i, c := range childrenEncoding { + s := "---------------" + lo := c & (1<> childrenBitsLo) & (1<>(childrenBitsLo+childrenBitsHi)) & (1<>(childrenBitsLo+childrenBitsHi+childrenBitsNodeType) != 0 + fmt.Fprintf(w, "0x%08x, // c0x%04x (%s)%s %s\n", + c, i, s, wildcardStr(wildcard), nodeTypeStr(nodeType)) + } + fmt.Fprintf(w, "}\n\n") + fmt.Fprintf(w, "// max children %d (capacity %d)\n", maxChildren, 1<= 1<= 1<= 1< 0 && ss[0] == "" { + ss = ss[1:] + } + return ss +} + +// crush combines a list of strings, taking advantage of overlaps. It returns a +// single string that contains each input string as a substring. +func crush(ss []string) string { + maxLabelLen := 0 + for _, s := range ss { + if maxLabelLen < len(s) { + maxLabelLen = len(s) + } + } + + for prefixLen := maxLabelLen; prefixLen > 0; prefixLen-- { + prefixes := makePrefixMap(ss, prefixLen) + for i, s := range ss { + if len(s) <= prefixLen { + continue + } + mergeLabel(ss, i, prefixLen, prefixes) + } + } + + return strings.Join(ss, "") +} + +// mergeLabel merges the label at ss[i] with the first available matching label +// in prefixMap, where the last "prefixLen" characters in ss[i] match the first +// "prefixLen" characters in the matching label. +// It will merge ss[i] repeatedly until no more matches are available. +// All matching labels merged into ss[i] are replaced by "". +func mergeLabel(ss []string, i, prefixLen int, prefixes prefixMap) { + s := ss[i] + suffix := s[len(s)-prefixLen:] + for _, j := range prefixes[suffix] { + // Empty strings mean "already used." Also avoid merging with self. + if ss[j] == "" || i == j { + continue + } + if *v { + fmt.Fprintf(os.Stderr, "%d-length overlap at (%4d,%4d): %q and %q share %q\n", + prefixLen, i, j, ss[i], ss[j], suffix) + } + ss[i] += ss[j][prefixLen:] + ss[j] = "" + // ss[i] has a new suffix, so merge again if possible. + // Note: we only have to merge again at the same prefix length. Shorter + // prefix lengths will be handled in the next iteration of crush's for loop. + // Can there be matches for longer prefix lengths, introduced by the merge? + // I believe that any such matches would by necessity have been eliminated + // during substring removal or merged at a higher prefix length. For + // instance, in crush("abc", "cde", "bcdef"), combining "abc" and "cde" + // would yield "abcde", which could be merged with "bcdef." However, in + // practice "cde" would already have been elimintated by removeSubstrings. + mergeLabel(ss, i, prefixLen, prefixes) + return + } +} + +// prefixMap maps from a prefix to a list of strings containing that prefix. The +// list of strings is represented as indexes into a slice of strings stored +// elsewhere. +type prefixMap map[string][]int + +// makePrefixMap constructs a prefixMap from a slice of strings. +func makePrefixMap(ss []string, prefixLen int) prefixMap { + prefixes := make(prefixMap) + for i, s := range ss { + // We use < rather than <= because if a label matches on a prefix equal to + // its full length, that's actually a substring match handled by + // removeSubstrings. + if prefixLen < len(s) { + prefix := s[:prefixLen] + prefixes[prefix] = append(prefixes[prefix], i) + } + } + + return prefixes +} diff --git a/vendor/golang.org/x/net/publicsuffix/list.go b/vendor/golang.org/x/net/publicsuffix/list.go new file mode 100644 index 00000000..9419ca99 --- /dev/null +++ b/vendor/golang.org/x/net/publicsuffix/list.go @@ -0,0 +1,133 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package publicsuffix provides a public suffix list based on data from +// http://publicsuffix.org/. A public suffix is one under which Internet users +// can directly register names. +package publicsuffix // import "golang.org/x/net/publicsuffix" + +// TODO: specify case sensitivity and leading/trailing dot behavior for +// func PublicSuffix and func EffectiveTLDPlusOne. + +import ( + "fmt" + "net/http/cookiejar" + "strings" +) + +// List implements the cookiejar.PublicSuffixList interface by calling the +// PublicSuffix function. +var List cookiejar.PublicSuffixList = list{} + +type list struct{} + +func (list) PublicSuffix(domain string) string { + ps, _ := PublicSuffix(domain) + return ps +} + +func (list) String() string { + return version +} + +// PublicSuffix returns the public suffix of the domain using a copy of the +// publicsuffix.org database compiled into the library. +// +// icann is whether the public suffix is managed by the Internet Corporation +// for Assigned Names and Numbers. If not, the public suffix is privately +// managed. For example, foo.org and foo.co.uk are ICANN domains, +// foo.dyndns.org and foo.blogspot.co.uk are private domains. +// +// Use cases for distinguishing ICANN domains like foo.com from private +// domains like foo.appspot.com can be found at +// https://wiki.mozilla.org/Public_Suffix_List/Use_Cases +func PublicSuffix(domain string) (publicSuffix string, icann bool) { + lo, hi := uint32(0), uint32(numTLD) + s, suffix, wildcard := domain, len(domain), false +loop: + for { + dot := strings.LastIndex(s, ".") + if wildcard { + suffix = 1 + dot + } + if lo == hi { + break + } + f := find(s[1+dot:], lo, hi) + if f == notFound { + break + } + + u := nodes[f] >> (nodesBitsTextOffset + nodesBitsTextLength) + icann = u&(1<>= nodesBitsICANN + u = children[u&(1<>= childrenBitsLo + hi = u & (1<>= childrenBitsHi + switch u & (1<>= childrenBitsNodeType + wildcard = u&(1<>= nodesBitsTextLength + offset := x & (1<&2 $* - exit 1 -} - -# Sanity check that the right tools are accessible. -for tool in go protoc protoc-gen-go; do - q=$(which $tool) || die "didn't find $tool" - echo 1>&2 "$tool: $q" -done - -echo -n 1>&2 "finding package dir... " -pkgdir=$(go list -f '{{.Dir}}' $PKG) -echo 1>&2 $pkgdir -base=$(echo $pkgdir | sed "s,/$PKG\$,,") -echo 1>&2 "base: $base" -cd $base - -# Run protoc once per package. -for dir in $(find $PKG/internal -name '*.proto' | xargs dirname | sort | uniq); do - echo 1>&2 "* $dir" - protoc --go_out=. $dir/*.proto -done - -for f in $(find $PKG/internal -name '*.pb.go'); do - # Remove proto.RegisterEnum calls. - # These cause duplicate registration panics when these packages - # are used on classic App Engine. proto.RegisterEnum only affects - # parsing the text format; we don't care about that. - # https://code.google.com/p/googleappengine/issues/detail?id=11670#c17 - sed -i '/proto.RegisterEnum/d' $f -done diff --git a/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go b/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go new file mode 100644 index 00000000..af463fbb --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go @@ -0,0 +1,355 @@ +// Code generated by protoc-gen-go. +// source: google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto +// DO NOT EDIT! + +/* +Package urlfetch is a generated protocol buffer package. + +It is generated from these files: + google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto + +It has these top-level messages: + URLFetchServiceError + URLFetchRequest + URLFetchResponse +*/ +package urlfetch + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type URLFetchServiceError_ErrorCode int32 + +const ( + URLFetchServiceError_OK URLFetchServiceError_ErrorCode = 0 + URLFetchServiceError_INVALID_URL URLFetchServiceError_ErrorCode = 1 + URLFetchServiceError_FETCH_ERROR URLFetchServiceError_ErrorCode = 2 + URLFetchServiceError_UNSPECIFIED_ERROR URLFetchServiceError_ErrorCode = 3 + URLFetchServiceError_RESPONSE_TOO_LARGE URLFetchServiceError_ErrorCode = 4 + URLFetchServiceError_DEADLINE_EXCEEDED URLFetchServiceError_ErrorCode = 5 + URLFetchServiceError_SSL_CERTIFICATE_ERROR URLFetchServiceError_ErrorCode = 6 + URLFetchServiceError_DNS_ERROR URLFetchServiceError_ErrorCode = 7 + URLFetchServiceError_CLOSED URLFetchServiceError_ErrorCode = 8 + URLFetchServiceError_INTERNAL_TRANSIENT_ERROR URLFetchServiceError_ErrorCode = 9 + URLFetchServiceError_TOO_MANY_REDIRECTS URLFetchServiceError_ErrorCode = 10 + URLFetchServiceError_MALFORMED_REPLY URLFetchServiceError_ErrorCode = 11 + URLFetchServiceError_CONNECTION_ERROR URLFetchServiceError_ErrorCode = 12 +) + +var URLFetchServiceError_ErrorCode_name = map[int32]string{ + 0: "OK", + 1: "INVALID_URL", + 2: "FETCH_ERROR", + 3: "UNSPECIFIED_ERROR", + 4: "RESPONSE_TOO_LARGE", + 5: "DEADLINE_EXCEEDED", + 6: "SSL_CERTIFICATE_ERROR", + 7: "DNS_ERROR", + 8: "CLOSED", + 9: "INTERNAL_TRANSIENT_ERROR", + 10: "TOO_MANY_REDIRECTS", + 11: "MALFORMED_REPLY", + 12: "CONNECTION_ERROR", +} +var URLFetchServiceError_ErrorCode_value = map[string]int32{ + "OK": 0, + "INVALID_URL": 1, + "FETCH_ERROR": 2, + "UNSPECIFIED_ERROR": 3, + "RESPONSE_TOO_LARGE": 4, + "DEADLINE_EXCEEDED": 5, + "SSL_CERTIFICATE_ERROR": 6, + "DNS_ERROR": 7, + "CLOSED": 8, + "INTERNAL_TRANSIENT_ERROR": 9, + "TOO_MANY_REDIRECTS": 10, + "MALFORMED_REPLY": 11, + "CONNECTION_ERROR": 12, +} + +func (x URLFetchServiceError_ErrorCode) Enum() *URLFetchServiceError_ErrorCode { + p := new(URLFetchServiceError_ErrorCode) + *p = x + return p +} +func (x URLFetchServiceError_ErrorCode) String() string { + return proto.EnumName(URLFetchServiceError_ErrorCode_name, int32(x)) +} +func (x *URLFetchServiceError_ErrorCode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(URLFetchServiceError_ErrorCode_value, data, "URLFetchServiceError_ErrorCode") + if err != nil { + return err + } + *x = URLFetchServiceError_ErrorCode(value) + return nil +} + +type URLFetchRequest_RequestMethod int32 + +const ( + URLFetchRequest_GET URLFetchRequest_RequestMethod = 1 + URLFetchRequest_POST URLFetchRequest_RequestMethod = 2 + URLFetchRequest_HEAD URLFetchRequest_RequestMethod = 3 + URLFetchRequest_PUT URLFetchRequest_RequestMethod = 4 + URLFetchRequest_DELETE URLFetchRequest_RequestMethod = 5 + URLFetchRequest_PATCH URLFetchRequest_RequestMethod = 6 +) + +var URLFetchRequest_RequestMethod_name = map[int32]string{ + 1: "GET", + 2: "POST", + 3: "HEAD", + 4: "PUT", + 5: "DELETE", + 6: "PATCH", +} +var URLFetchRequest_RequestMethod_value = map[string]int32{ + "GET": 1, + "POST": 2, + "HEAD": 3, + "PUT": 4, + "DELETE": 5, + "PATCH": 6, +} + +func (x URLFetchRequest_RequestMethod) Enum() *URLFetchRequest_RequestMethod { + p := new(URLFetchRequest_RequestMethod) + *p = x + return p +} +func (x URLFetchRequest_RequestMethod) String() string { + return proto.EnumName(URLFetchRequest_RequestMethod_name, int32(x)) +} +func (x *URLFetchRequest_RequestMethod) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(URLFetchRequest_RequestMethod_value, data, "URLFetchRequest_RequestMethod") + if err != nil { + return err + } + *x = URLFetchRequest_RequestMethod(value) + return nil +} + +type URLFetchServiceError struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *URLFetchServiceError) Reset() { *m = URLFetchServiceError{} } +func (m *URLFetchServiceError) String() string { return proto.CompactTextString(m) } +func (*URLFetchServiceError) ProtoMessage() {} + +type URLFetchRequest struct { + Method *URLFetchRequest_RequestMethod `protobuf:"varint,1,req,name=Method,enum=appengine.URLFetchRequest_RequestMethod" json:"Method,omitempty"` + Url *string `protobuf:"bytes,2,req,name=Url" json:"Url,omitempty"` + Header []*URLFetchRequest_Header `protobuf:"group,3,rep,name=Header" json:"header,omitempty"` + Payload []byte `protobuf:"bytes,6,opt,name=Payload" json:"Payload,omitempty"` + FollowRedirects *bool `protobuf:"varint,7,opt,name=FollowRedirects,def=1" json:"FollowRedirects,omitempty"` + Deadline *float64 `protobuf:"fixed64,8,opt,name=Deadline" json:"Deadline,omitempty"` + MustValidateServerCertificate *bool `protobuf:"varint,9,opt,name=MustValidateServerCertificate,def=1" json:"MustValidateServerCertificate,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *URLFetchRequest) Reset() { *m = URLFetchRequest{} } +func (m *URLFetchRequest) String() string { return proto.CompactTextString(m) } +func (*URLFetchRequest) ProtoMessage() {} + +const Default_URLFetchRequest_FollowRedirects bool = true +const Default_URLFetchRequest_MustValidateServerCertificate bool = true + +func (m *URLFetchRequest) GetMethod() URLFetchRequest_RequestMethod { + if m != nil && m.Method != nil { + return *m.Method + } + return URLFetchRequest_GET +} + +func (m *URLFetchRequest) GetUrl() string { + if m != nil && m.Url != nil { + return *m.Url + } + return "" +} + +func (m *URLFetchRequest) GetHeader() []*URLFetchRequest_Header { + if m != nil { + return m.Header + } + return nil +} + +func (m *URLFetchRequest) GetPayload() []byte { + if m != nil { + return m.Payload + } + return nil +} + +func (m *URLFetchRequest) GetFollowRedirects() bool { + if m != nil && m.FollowRedirects != nil { + return *m.FollowRedirects + } + return Default_URLFetchRequest_FollowRedirects +} + +func (m *URLFetchRequest) GetDeadline() float64 { + if m != nil && m.Deadline != nil { + return *m.Deadline + } + return 0 +} + +func (m *URLFetchRequest) GetMustValidateServerCertificate() bool { + if m != nil && m.MustValidateServerCertificate != nil { + return *m.MustValidateServerCertificate + } + return Default_URLFetchRequest_MustValidateServerCertificate +} + +type URLFetchRequest_Header struct { + Key *string `protobuf:"bytes,4,req,name=Key" json:"Key,omitempty"` + Value *string `protobuf:"bytes,5,req,name=Value" json:"Value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *URLFetchRequest_Header) Reset() { *m = URLFetchRequest_Header{} } +func (m *URLFetchRequest_Header) String() string { return proto.CompactTextString(m) } +func (*URLFetchRequest_Header) ProtoMessage() {} + +func (m *URLFetchRequest_Header) GetKey() string { + if m != nil && m.Key != nil { + return *m.Key + } + return "" +} + +func (m *URLFetchRequest_Header) GetValue() string { + if m != nil && m.Value != nil { + return *m.Value + } + return "" +} + +type URLFetchResponse struct { + Content []byte `protobuf:"bytes,1,opt,name=Content" json:"Content,omitempty"` + StatusCode *int32 `protobuf:"varint,2,req,name=StatusCode" json:"StatusCode,omitempty"` + Header []*URLFetchResponse_Header `protobuf:"group,3,rep,name=Header" json:"header,omitempty"` + ContentWasTruncated *bool `protobuf:"varint,6,opt,name=ContentWasTruncated,def=0" json:"ContentWasTruncated,omitempty"` + ExternalBytesSent *int64 `protobuf:"varint,7,opt,name=ExternalBytesSent" json:"ExternalBytesSent,omitempty"` + ExternalBytesReceived *int64 `protobuf:"varint,8,opt,name=ExternalBytesReceived" json:"ExternalBytesReceived,omitempty"` + FinalUrl *string `protobuf:"bytes,9,opt,name=FinalUrl" json:"FinalUrl,omitempty"` + ApiCpuMilliseconds *int64 `protobuf:"varint,10,opt,name=ApiCpuMilliseconds,def=0" json:"ApiCpuMilliseconds,omitempty"` + ApiBytesSent *int64 `protobuf:"varint,11,opt,name=ApiBytesSent,def=0" json:"ApiBytesSent,omitempty"` + ApiBytesReceived *int64 `protobuf:"varint,12,opt,name=ApiBytesReceived,def=0" json:"ApiBytesReceived,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *URLFetchResponse) Reset() { *m = URLFetchResponse{} } +func (m *URLFetchResponse) String() string { return proto.CompactTextString(m) } +func (*URLFetchResponse) ProtoMessage() {} + +const Default_URLFetchResponse_ContentWasTruncated bool = false +const Default_URLFetchResponse_ApiCpuMilliseconds int64 = 0 +const Default_URLFetchResponse_ApiBytesSent int64 = 0 +const Default_URLFetchResponse_ApiBytesReceived int64 = 0 + +func (m *URLFetchResponse) GetContent() []byte { + if m != nil { + return m.Content + } + return nil +} + +func (m *URLFetchResponse) GetStatusCode() int32 { + if m != nil && m.StatusCode != nil { + return *m.StatusCode + } + return 0 +} + +func (m *URLFetchResponse) GetHeader() []*URLFetchResponse_Header { + if m != nil { + return m.Header + } + return nil +} + +func (m *URLFetchResponse) GetContentWasTruncated() bool { + if m != nil && m.ContentWasTruncated != nil { + return *m.ContentWasTruncated + } + return Default_URLFetchResponse_ContentWasTruncated +} + +func (m *URLFetchResponse) GetExternalBytesSent() int64 { + if m != nil && m.ExternalBytesSent != nil { + return *m.ExternalBytesSent + } + return 0 +} + +func (m *URLFetchResponse) GetExternalBytesReceived() int64 { + if m != nil && m.ExternalBytesReceived != nil { + return *m.ExternalBytesReceived + } + return 0 +} + +func (m *URLFetchResponse) GetFinalUrl() string { + if m != nil && m.FinalUrl != nil { + return *m.FinalUrl + } + return "" +} + +func (m *URLFetchResponse) GetApiCpuMilliseconds() int64 { + if m != nil && m.ApiCpuMilliseconds != nil { + return *m.ApiCpuMilliseconds + } + return Default_URLFetchResponse_ApiCpuMilliseconds +} + +func (m *URLFetchResponse) GetApiBytesSent() int64 { + if m != nil && m.ApiBytesSent != nil { + return *m.ApiBytesSent + } + return Default_URLFetchResponse_ApiBytesSent +} + +func (m *URLFetchResponse) GetApiBytesReceived() int64 { + if m != nil && m.ApiBytesReceived != nil { + return *m.ApiBytesReceived + } + return Default_URLFetchResponse_ApiBytesReceived +} + +type URLFetchResponse_Header struct { + Key *string `protobuf:"bytes,4,req,name=Key" json:"Key,omitempty"` + Value *string `protobuf:"bytes,5,req,name=Value" json:"Value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *URLFetchResponse_Header) Reset() { *m = URLFetchResponse_Header{} } +func (m *URLFetchResponse_Header) String() string { return proto.CompactTextString(m) } +func (*URLFetchResponse_Header) ProtoMessage() {} + +func (m *URLFetchResponse_Header) GetKey() string { + if m != nil && m.Key != nil { + return *m.Key + } + return "" +} + +func (m *URLFetchResponse_Header) GetValue() string { + if m != nil && m.Value != nil { + return *m.Value + } + return "" +} + +func init() { +} diff --git a/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto b/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto new file mode 100644 index 00000000..f695edf6 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto @@ -0,0 +1,64 @@ +syntax = "proto2"; +option go_package = "urlfetch"; + +package appengine; + +message URLFetchServiceError { + enum ErrorCode { + OK = 0; + INVALID_URL = 1; + FETCH_ERROR = 2; + UNSPECIFIED_ERROR = 3; + RESPONSE_TOO_LARGE = 4; + DEADLINE_EXCEEDED = 5; + SSL_CERTIFICATE_ERROR = 6; + DNS_ERROR = 7; + CLOSED = 8; + INTERNAL_TRANSIENT_ERROR = 9; + TOO_MANY_REDIRECTS = 10; + MALFORMED_REPLY = 11; + CONNECTION_ERROR = 12; + } +} + +message URLFetchRequest { + enum RequestMethod { + GET = 1; + POST = 2; + HEAD = 3; + PUT = 4; + DELETE = 5; + PATCH = 6; + } + required RequestMethod Method = 1; + required string Url = 2; + repeated group Header = 3 { + required string Key = 4; + required string Value = 5; + } + optional bytes Payload = 6 [ctype=CORD]; + + optional bool FollowRedirects = 7 [default=true]; + + optional double Deadline = 8; + + optional bool MustValidateServerCertificate = 9 [default=true]; +} + +message URLFetchResponse { + optional bytes Content = 1; + required int32 StatusCode = 2; + repeated group Header = 3 { + required string Key = 4; + required string Value = 5; + } + optional bool ContentWasTruncated = 6 [default=false]; + optional int64 ExternalBytesSent = 7; + optional int64 ExternalBytesReceived = 8; + + optional string FinalUrl = 9; + + optional int64 ApiCpuMilliseconds = 10 [default=0]; + optional int64 ApiBytesSent = 11 [default=0]; + optional int64 ApiBytesReceived = 12 [default=0]; +} diff --git a/vendor/google.golang.org/appengine/urlfetch/urlfetch.go b/vendor/google.golang.org/appengine/urlfetch/urlfetch.go new file mode 100644 index 00000000..6ffe1e6d --- /dev/null +++ b/vendor/google.golang.org/appengine/urlfetch/urlfetch.go @@ -0,0 +1,210 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// Package urlfetch provides an http.RoundTripper implementation +// for fetching URLs via App Engine's urlfetch service. +package urlfetch // import "google.golang.org/appengine/urlfetch" + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + + "google.golang.org/appengine/internal" + pb "google.golang.org/appengine/internal/urlfetch" +) + +// Transport is an implementation of http.RoundTripper for +// App Engine. Users should generally create an http.Client using +// this transport and use the Client rather than using this transport +// directly. +type Transport struct { + Context context.Context + + // Controls whether the application checks the validity of SSL certificates + // over HTTPS connections. A value of false (the default) instructs the + // application to send a request to the server only if the certificate is + // valid and signed by a trusted certificate authority (CA), and also + // includes a hostname that matches the certificate. A value of true + // instructs the application to perform no certificate validation. + AllowInvalidServerCertificate bool +} + +// Verify statically that *Transport implements http.RoundTripper. +var _ http.RoundTripper = (*Transport)(nil) + +// Client returns an *http.Client using a default urlfetch Transport. This +// client will have the default deadline of 5 seconds, and will check the +// validity of SSL certificates. +// +// Any deadline of the provided context will be used for requests through this client; +// if the client does not have a deadline then a 5 second default is used. +func Client(ctx context.Context) *http.Client { + return &http.Client{ + Transport: &Transport{ + Context: ctx, + }, + } +} + +type bodyReader struct { + content []byte + truncated bool + closed bool +} + +// ErrTruncatedBody is the error returned after the final Read() from a +// response's Body if the body has been truncated by App Engine's proxy. +var ErrTruncatedBody = errors.New("urlfetch: truncated body") + +func statusCodeToText(code int) string { + if t := http.StatusText(code); t != "" { + return t + } + return strconv.Itoa(code) +} + +func (br *bodyReader) Read(p []byte) (n int, err error) { + if br.closed { + if br.truncated { + return 0, ErrTruncatedBody + } + return 0, io.EOF + } + n = copy(p, br.content) + if n > 0 { + br.content = br.content[n:] + return + } + if br.truncated { + br.closed = true + return 0, ErrTruncatedBody + } + return 0, io.EOF +} + +func (br *bodyReader) Close() error { + br.closed = true + br.content = nil + return nil +} + +// A map of the URL Fetch-accepted methods that take a request body. +var methodAcceptsRequestBody = map[string]bool{ + "POST": true, + "PUT": true, + "PATCH": true, +} + +// urlString returns a valid string given a URL. This function is necessary because +// the String method of URL doesn't correctly handle URLs with non-empty Opaque values. +// See http://code.google.com/p/go/issues/detail?id=4860. +func urlString(u *url.URL) string { + if u.Opaque == "" || strings.HasPrefix(u.Opaque, "//") { + return u.String() + } + aux := *u + aux.Opaque = "//" + aux.Host + aux.Opaque + return aux.String() +} + +// RoundTrip issues a single HTTP request and returns its response. Per the +// http.RoundTripper interface, RoundTrip only returns an error if there +// was an unsupported request or the URL Fetch proxy fails. +// Note that HTTP response codes such as 5xx, 403, 404, etc are not +// errors as far as the transport is concerned and will be returned +// with err set to nil. +func (t *Transport) RoundTrip(req *http.Request) (res *http.Response, err error) { + methNum, ok := pb.URLFetchRequest_RequestMethod_value[req.Method] + if !ok { + return nil, fmt.Errorf("urlfetch: unsupported HTTP method %q", req.Method) + } + + method := pb.URLFetchRequest_RequestMethod(methNum) + + freq := &pb.URLFetchRequest{ + Method: &method, + Url: proto.String(urlString(req.URL)), + FollowRedirects: proto.Bool(false), // http.Client's responsibility + MustValidateServerCertificate: proto.Bool(!t.AllowInvalidServerCertificate), + } + if deadline, ok := t.Context.Deadline(); ok { + freq.Deadline = proto.Float64(deadline.Sub(time.Now()).Seconds()) + } + + for k, vals := range req.Header { + for _, val := range vals { + freq.Header = append(freq.Header, &pb.URLFetchRequest_Header{ + Key: proto.String(k), + Value: proto.String(val), + }) + } + } + if methodAcceptsRequestBody[req.Method] && req.Body != nil { + // Avoid a []byte copy if req.Body has a Bytes method. + switch b := req.Body.(type) { + case interface { + Bytes() []byte + }: + freq.Payload = b.Bytes() + default: + freq.Payload, err = ioutil.ReadAll(req.Body) + if err != nil { + return nil, err + } + } + } + + fres := &pb.URLFetchResponse{} + if err := internal.Call(t.Context, "urlfetch", "Fetch", freq, fres); err != nil { + return nil, err + } + + res = &http.Response{} + res.StatusCode = int(*fres.StatusCode) + res.Status = fmt.Sprintf("%d %s", res.StatusCode, statusCodeToText(res.StatusCode)) + res.Header = make(http.Header) + res.Request = req + + // Faked: + res.ProtoMajor = 1 + res.ProtoMinor = 1 + res.Proto = "HTTP/1.1" + res.Close = true + + for _, h := range fres.Header { + hkey := http.CanonicalHeaderKey(*h.Key) + hval := *h.Value + if hkey == "Content-Length" { + // Will get filled in below for all but HEAD requests. + if req.Method == "HEAD" { + res.ContentLength, _ = strconv.ParseInt(hval, 10, 64) + } + continue + } + res.Header.Add(hkey, hval) + } + + if req.Method != "HEAD" { + res.ContentLength = int64(len(fres.Content)) + } + + truncated := fres.GetContentWasTruncated() + res.Body = &bodyReader{content: fres.Content, truncated: truncated} + return +} + +func init() { + internal.RegisterErrorCodeMap("urlfetch", pb.URLFetchServiceError_ErrorCode_name) + internal.RegisterTimeoutErrorCode("urlfetch", int32(pb.URLFetchServiceError_DEADLINE_EXCEEDED)) +} diff --git a/vendor/google.golang.org/cloud/.travis.yml b/vendor/google.golang.org/cloud/.travis.yml deleted file mode 100644 index c037df0d..00000000 --- a/vendor/google.golang.org/cloud/.travis.yml +++ /dev/null @@ -1,11 +0,0 @@ -sudo: false -language: go -go: -- 1.4 -- 1.5 -install: -- go get -v google.golang.org/cloud/... -script: -- openssl aes-256-cbc -K $encrypted_912ff8fa81ad_key -iv $encrypted_912ff8fa81ad_iv -in key.json.enc -out key.json -d -- GCLOUD_TESTS_GOLANG_PROJECT_ID="dulcet-port-762" GCLOUD_TESTS_GOLANG_KEY="$(pwd)/key.json" - go test -v -tags=integration google.golang.org/cloud/... diff --git a/vendor/google.golang.org/cloud/AUTHORS b/vendor/google.golang.org/cloud/AUTHORS deleted file mode 100644 index 3da443dc..00000000 --- a/vendor/google.golang.org/cloud/AUTHORS +++ /dev/null @@ -1,12 +0,0 @@ -# This is the official list of cloud authors for copyright purposes. -# This file is distinct from the CONTRIBUTORS files. -# See the latter for an explanation. - -# Names should be added to this file as: -# Name or Organization -# The email address is not required for organizations. - -Google Inc. -Palm Stone Games, Inc. -Péter Szilágyi -Tyler Treat diff --git a/vendor/google.golang.org/cloud/CONTRIBUTING.md b/vendor/google.golang.org/cloud/CONTRIBUTING.md deleted file mode 100644 index 9a1cab28..00000000 --- a/vendor/google.golang.org/cloud/CONTRIBUTING.md +++ /dev/null @@ -1,114 +0,0 @@ -# Contributing - -1. Sign one of the contributor license agreements below. -1. `go get golang.org/x/review/git-codereview` to install the code reviewing tool. -1. Get the cloud package by running `go get -d google.golang.org/cloud`. - 1. If you have already checked out the source, make sure that the remote git - origin is https://code.googlesource.com/gocloud: - - git remote set-url origin https://code.googlesource.com/gocloud -1. Make changes and create a change by running `git codereview change `, -provide a command message, and use `git codereview mail` to create a Gerrit CL. -1. Keep amending to the change and mail as your recieve feedback. - -## Integration Tests - -Additional to the unit tests, you may run the integration test suite. - -To run the integrations tests, creating and configuration of a project in the -Google Developers Console is required. Once you create a project, set the -following environment variables to be able to run the against the actual APIs. - -- **GCLOUD_TESTS_GOLANG_PROJECT_ID**: Developers Console project's ID (e.g. bamboo-shift-455) -- **GCLOUD_TESTS_GOLANG_KEY**: The path to the JSON key file. - -Create a storage bucket with the same name as the project id set in **GCLOUD_TESTS_GOLANG_PROJECT_ID**. -The storage integration test will create and delete some objects in this bucket. - -Install the [gcloud command-line tool][gcloudcli] to your machine and use it -to create the indexes used in the datastore integration tests with indexes -found in `datastore/testdata/index.yaml`: - -From the project's root directory: - -``` sh -# Install the app component -$ gcloud components update app - -# Set the default project in your env -$ gcloud config set project $GCLOUD_TESTS_GOLANG_PROJECT_ID - -# Authenticate the gcloud tool with your account -$ gcloud auth login - -# Create the indexes -$ gcloud preview datastore create-indexes datastore/testdata/index.yaml - -``` - -You can run the integration tests by running: - -``` sh -$ go test -v -tags=integration google.golang.org/cloud/... -``` - -## Contributor License Agreements - -Before we can accept your pull requests you'll need to sign a Contributor -License Agreement (CLA): - -- **If you are an individual writing original source code** and **you own the -- intellectual property**, then you'll need to sign an [individual CLA][indvcla]. -- **If you work for a company that wants to allow you to contribute your work**, -then you'll need to sign a [corporate CLA][corpcla]. - -You can sign these electronically (just scroll to the bottom). After that, -we'll be able to accept your pull requests. - -## Contributor Code of Conduct - -As contributors and maintainers of this project, -and in the interest of fostering an open and welcoming community, -we pledge to respect all people who contribute through reporting issues, -posting feature requests, updating documentation, -submitting pull requests or patches, and other activities. - -We are committed to making participation in this project -a harassment-free experience for everyone, -regardless of level of experience, gender, gender identity and expression, -sexual orientation, disability, personal appearance, -body size, race, ethnicity, age, religion, or nationality. - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery -* Personal attacks -* Trolling or insulting/derogatory comments -* Public or private harassment -* Publishing other's private information, -such as physical or electronic -addresses, without explicit permission -* Other unethical or unprofessional conduct. - -Project maintainers have the right and responsibility to remove, edit, or reject -comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct. -By adopting this Code of Conduct, -project maintainers commit themselves to fairly and consistently -applying these principles to every aspect of managing this project. -Project maintainers who do not follow or enforce the Code of Conduct -may be permanently removed from the project team. - -This code of conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. - -Instances of abusive, harassing, or otherwise unacceptable behavior -may be reported by opening an issue -or contacting one or more of the project maintainers. - -This Code of Conduct is adapted from the [Contributor Covenant](http://contributor-covenant.org), version 1.2.0, -available at [http://contributor-covenant.org/version/1/2/0/](http://contributor-covenant.org/version/1/2/0/) - -[gcloudcli]: https://developers.google.com/cloud/sdk/gcloud/ -[indvcla]: https://developers.google.com/open-source/cla/individual -[corpcla]: https://developers.google.com/open-source/cla/corporate diff --git a/vendor/google.golang.org/cloud/CONTRIBUTORS b/vendor/google.golang.org/cloud/CONTRIBUTORS deleted file mode 100644 index 475ac6a6..00000000 --- a/vendor/google.golang.org/cloud/CONTRIBUTORS +++ /dev/null @@ -1,24 +0,0 @@ -# People who have agreed to one of the CLAs and can contribute patches. -# The AUTHORS file lists the copyright holders; this file -# lists people. For example, Google employees are listed here -# but not in AUTHORS, because Google holds the copyright. -# -# https://developers.google.com/open-source/cla/individual -# https://developers.google.com/open-source/cla/corporate -# -# Names should be added to this file as: -# Name - -# Keep the list alphabetically sorted. - -Andrew Gerrand -Brad Fitzpatrick -Burcu Dogan -Dave Day -David Symonds -Glenn Lewis -Johan Euphrosine -Luna Duclos -Michael McGreevy -Péter Szilágyi -Tyler Treat diff --git a/vendor/google.golang.org/cloud/README.md b/vendor/google.golang.org/cloud/README.md deleted file mode 100644 index 10d3995d..00000000 --- a/vendor/google.golang.org/cloud/README.md +++ /dev/null @@ -1,135 +0,0 @@ -# Google Cloud for Go - -[![Build Status](https://travis-ci.org/GoogleCloudPlatform/gcloud-golang.svg?branch=master)](https://travis-ci.org/GoogleCloudPlatform/gcloud-golang) - -**NOTE:** These packages are experimental, and may occasionally make -backwards-incompatible changes. - -**NOTE:** Github repo is a mirror of [https://code.googlesource.com/gocloud](https://code.googlesource.com/gocloud). - -Go packages for Google Cloud Platform services. Supported APIs include: - - * Google Cloud Datastore - * Google Cloud Storage - * Google Cloud Pub/Sub - * Google Cloud Container Engine - -``` go -import "google.golang.org/cloud" -``` - -Documentation and examples are available at -[https://godoc.org/google.golang.org/cloud](https://godoc.org/google.golang.org/cloud). - -## Authorization - -Authorization, throughout the package, is delegated to the godoc.org/golang.org/x/oauth2. -Refer to the [godoc documentation](https://godoc.org/golang.org/x/oauth2) -for examples on using oauth2 with the Cloud package. - -## Google Cloud Datastore - -[Google Cloud Datastore][cloud-datastore] ([docs][cloud-datastore-docs]) is a fully -managed, schemaless database for storing non-relational data. Cloud Datastore -automatically scales with your users and supports ACID transactions, high availability -of reads and writes, strong consistency for reads and ancestor queries, and eventual -consistency for all other queries. - -Follow the [activation instructions][cloud-datastore-activation] to use the Google -Cloud Datastore API with your project. - -[https://godoc.org/google.golang.org/cloud/datastore](https://godoc.org/google.golang.org/cloud/datastore) - - -```go -type Post struct { - Title string - Body string `datastore:",noindex"` - PublishedAt time.Time -} -keys := []*datastore.Key{ - datastore.NewKey(ctx, "Post", "post1", 0, nil), - datastore.NewKey(ctx, "Post", "post2", 0, nil), -} -posts := []*Post{ - {Title: "Post 1", Body: "...", PublishedAt: time.Now()}, - {Title: "Post 2", Body: "...", PublishedAt: time.Now()}, -} -if _, err := datastore.PutMulti(ctx, keys, posts); err != nil { - log.Println(err) -} -``` - -## Google Cloud Storage - -[Google Cloud Storage][cloud-storage] ([docs][cloud-storage-docs]) allows you to store -data on Google infrastructure with very high reliability, performance and availability, -and can be used to distribute large data objects to users via direct download. - -[https://godoc.org/google.golang.org/cloud/storage](https://godoc.org/google.golang.org/cloud/storage) - - -```go -// Read the object1 from bucket. -rc, err := storage.NewReader(ctx, "bucket", "object1") -if err != nil { - log.Fatal(err) -} -slurp, err := ioutil.ReadAll(rc) -rc.Close() -if err != nil { - log.Fatal(err) -} -``` - -## Google Cloud Pub/Sub (Alpha) - -> Google Cloud Pub/Sub is in **Alpha status**. As a result, it might change in -> backward-incompatible ways and is not recommended for production use. It is not -> subject to any SLA or deprecation policy. - -[Google Cloud Pub/Sub][cloud-pubsub] ([docs][cloud-pubsub-docs]) allows you to connect -your services with reliable, many-to-many, asynchronous messaging hosted on Google's -infrastructure. Cloud Pub/Sub automatically scales as you need it and provides a foundation -for building your own robust, global services. - -[https://godoc.org/google.golang.org/cloud/pubsub](https://godoc.org/google.golang.org/cloud/pubsub) - - -```go -// Publish "hello world" on topic1. -msgIDs, err := pubsub.Publish(ctx, "topic1", &pubsub.Message{ - Data: []byte("hello world"), -}) -if err != nil { - log.Println(err) -} -// Pull messages via subscription1. -msgs, err := pubsub.Pull(ctx, "subscription1", 1) -if err != nil { - log.Println(err) -} -``` - -## Contributing - -Contributions are welcome. Please, see the -[CONTRIBUTING](https://github.com/GoogleCloudPlatform/gcloud-golang/blob/master/CONTRIBUTING.md) -document for details. We're using Gerrit for our code reviews. Please don't open pull -requests against this repo, new pull requests will be automatically closed. - -Please note that this project is released with a Contributor Code of Conduct. -By participating in this project you agree to abide by its terms. -See [Contributor Code of Conduct](https://github.com/GoogleCloudPlatform/gcloud-golang/blob/master/CONTRIBUTING.md#contributor-code-of-conduct) -for more information. - -[cloud-datastore]: https://cloud.google.com/datastore/ -[cloud-datastore-docs]: https://cloud.google.com/datastore/docs -[cloud-datastore-activation]: https://cloud.google.com/datastore/docs/activate - -[cloud-pubsub]: https://cloud.google.com/pubsub/ -[cloud-pubsub-docs]: https://cloud.google.com/pubsub/docs - -[cloud-storage]: https://cloud.google.com/storage/ -[cloud-storage-docs]: https://cloud.google.com/storage/docs/overview -[cloud-storage-create-bucket]: https://cloud.google.com/storage/docs/cloud-console#_creatingbuckets diff --git a/vendor/google.golang.org/cloud/cloud.go b/vendor/google.golang.org/cloud/cloud.go index a634b055..96d36baf 100644 --- a/vendor/google.golang.org/cloud/cloud.go +++ b/vendor/google.golang.org/cloud/cloud.go @@ -14,7 +14,7 @@ // Package cloud contains Google Cloud Platform APIs related types // and common functions. -package cloud +package cloud // import "google.golang.org/cloud" import ( "net/http" diff --git a/vendor/google.golang.org/cloud/compute/metadata/metadata.go b/vendor/google.golang.org/cloud/compute/metadata/metadata.go index 3dd684e0..972972dd 100644 --- a/vendor/google.golang.org/cloud/compute/metadata/metadata.go +++ b/vendor/google.golang.org/cloud/compute/metadata/metadata.go @@ -17,7 +17,7 @@ // // This package is a wrapper around the GCE metadata service, // as documented at https://developers.google.com/compute/docs/metadata. -package metadata +package metadata // import "google.golang.org/cloud/compute/metadata" import ( "encoding/json" diff --git a/vendor/google.golang.org/cloud/key.json.enc b/vendor/google.golang.org/cloud/key.json.enc deleted file mode 100644 index 2f673a84..00000000 Binary files a/vendor/google.golang.org/cloud/key.json.enc and /dev/null differ diff --git a/vendor/google.golang.org/cloud/storage/storage.go b/vendor/google.golang.org/cloud/storage/storage.go index bf22c6ae..8aa70ff4 100644 --- a/vendor/google.golang.org/cloud/storage/storage.go +++ b/vendor/google.golang.org/cloud/storage/storage.go @@ -15,7 +15,7 @@ // Package storage contains a Google Cloud Storage client. // // This package is experimental and may make backwards-incompatible changes. -package storage +package storage // import "google.golang.org/cloud/storage" import ( "crypto" diff --git a/vendor/google.golang.org/grpc/.travis.yml b/vendor/google.golang.org/grpc/.travis.yml deleted file mode 100644 index 055d6641..00000000 --- a/vendor/google.golang.org/grpc/.travis.yml +++ /dev/null @@ -1,13 +0,0 @@ -language: go - -before_install: - - go get github.com/axw/gocov/gocov - - go get github.com/mattn/goveralls - - go get golang.org/x/tools/cmd/cover - -install: - - mkdir -p "$GOPATH/src/google.golang.org" - - mv "$TRAVIS_BUILD_DIR" "$GOPATH/src/google.golang.org/grpc" - -script: - - make test testrace diff --git a/vendor/google.golang.org/grpc/CONTRIBUTING.md b/vendor/google.golang.org/grpc/CONTRIBUTING.md deleted file mode 100644 index 407d384a..00000000 --- a/vendor/google.golang.org/grpc/CONTRIBUTING.md +++ /dev/null @@ -1,23 +0,0 @@ -# How to contribute - -We definitely welcome patches and contribution to grpc! Here is some guideline -and information about how to do so. - -## Getting started - -### Legal requirements - -In order to protect both you and ourselves, you will need to sign the -[Contributor License Agreement](https://cla.developers.google.com/clas). - -### Filing Issues -When filing an issue, make sure to answer these five questions: - -1. What version of Go are you using (`go version`)? -2. What operating system and processor architecture are you using? -3. What did you do? -4. What did you expect to see? -5. What did you see instead? - -### Contributing code -Unless otherwise noted, the Go source files are distributed under the BSD-style license found in the LICENSE file. diff --git a/vendor/google.golang.org/grpc/Makefile b/vendor/google.golang.org/grpc/Makefile deleted file mode 100644 index 12e84e4e..00000000 --- a/vendor/google.golang.org/grpc/Makefile +++ /dev/null @@ -1,50 +0,0 @@ -.PHONY: \ - all \ - deps \ - updatedeps \ - testdeps \ - updatetestdeps \ - build \ - proto \ - test \ - testrace \ - clean \ - -all: test testrace - -deps: - go get -d -v google.golang.org/grpc/... - -updatedeps: - go get -d -v -u -f google.golang.org/grpc/... - -testdeps: - go get -d -v -t google.golang.org/grpc/... - -updatetestdeps: - go get -d -v -t -u -f google.golang.org/grpc/... - -build: deps - go build google.golang.org/grpc/... - -proto: - @ if ! which protoc > /dev/null; then \ - echo "error: protoc not installed" >&2; \ - exit 1; \ - fi - go get -v github.com/golang/protobuf/protoc-gen-go - for file in $$(git ls-files '*.proto'); do \ - protoc -I $$(dirname $$file) --go_out=plugins=grpc:$$(dirname $$file) $$file; \ - done - -test: testdeps - go test -v -cpu 1,4 google.golang.org/grpc/... - -testrace: testdeps - go test -v -race -cpu 1,4 google.golang.org/grpc/... - -clean: - go clean google.golang.org/grpc/... - -coverage: testdeps - ./coverage.sh --coveralls diff --git a/vendor/google.golang.org/grpc/README.md b/vendor/google.golang.org/grpc/README.md deleted file mode 100644 index 37b05f09..00000000 --- a/vendor/google.golang.org/grpc/README.md +++ /dev/null @@ -1,32 +0,0 @@ -#gRPC-Go - -[![Build Status](https://travis-ci.org/grpc/grpc-go.svg)](https://travis-ci.org/grpc/grpc-go) [![GoDoc](https://godoc.org/google.golang.org/grpc?status.svg)](https://godoc.org/google.golang.org/grpc) - -The Go implementation of [gRPC](http://www.grpc.io/): A high performance, open source, general RPC framework that puts mobile and HTTP/2 first. For more information see the [gRPC Quick Start](http://www.grpc.io/docs/) guide. - -Installation ------------- - -To install this package, you need to install Go 1.4 or above and setup your Go workspace on your computer. The simplest way to install the library is to run: - -``` -$ go get google.golang.org/grpc -``` - -Prerequisites -------------- - -This requires Go 1.4 or above. - -Constraints ------------ -The grpc package should only depend on standard Go packages and a small number of exceptions. If your contribution introduces new dependencies which are NOT in the [list](http://godoc.org/google.golang.org/grpc?imports), you need a discussion with gRPC-Go authors and consultants. - -Documentation -------------- -See [API documentation](https://godoc.org/google.golang.org/grpc) for package and API descriptions and find examples in the [examples directory](examples/). - -Status ------- -Beta release - diff --git a/vendor/google.golang.org/grpc/codegen.sh b/vendor/google.golang.org/grpc/codegen.sh deleted file mode 100644 index b0094888..00000000 --- a/vendor/google.golang.org/grpc/codegen.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash - -# This script serves as an example to demonstrate how to generate the gRPC-Go -# interface and the related messages from .proto file. -# -# It assumes the installation of i) Google proto buffer compiler at -# https://github.com/google/protobuf (after v2.6.1) and ii) the Go codegen -# plugin at https://github.com/golang/protobuf (after 2015-02-20). If you have -# not, please install them first. -# -# We recommend running this script at $GOPATH/src. -# -# If this is not what you need, feel free to make your own scripts. Again, this -# script is for demonstration purpose. -# -proto=$1 -protoc --go_out=plugins=grpc:. $proto diff --git a/vendor/google.golang.org/grpc/codes/codes.go b/vendor/google.golang.org/grpc/codes/codes.go index 37c5b860..e14b464a 100644 --- a/vendor/google.golang.org/grpc/codes/codes.go +++ b/vendor/google.golang.org/grpc/codes/codes.go @@ -33,7 +33,7 @@ // Package codes defines the canonical error codes used by gRPC. It is // consistent across various languages. -package codes +package codes // import "google.golang.org/grpc/codes" // A Code is an unsigned 32-bit error code as defined in the gRPC spec. type Code uint32 diff --git a/vendor/google.golang.org/grpc/coverage.sh b/vendor/google.golang.org/grpc/coverage.sh deleted file mode 100644 index 12023537..00000000 --- a/vendor/google.golang.org/grpc/coverage.sh +++ /dev/null @@ -1,47 +0,0 @@ -#!/bin/bash - -set -e - -workdir=.cover -profile="$workdir/cover.out" -mode=set -end2endtest="google.golang.org/grpc/test" - -generate_cover_data() { - rm -rf "$workdir" - mkdir "$workdir" - - for pkg in "$@"; do - if [ $pkg == "google.golang.org/grpc" -o $pkg == "google.golang.org/grpc/transport" -o $pkg == "google.golang.org/grpc/metadata" -o $pkg == "google.golang.org/grpc/credentials" ] - then - f="$workdir/$(echo $pkg | tr / -)" - go test -covermode="$mode" -coverprofile="$f.cover" "$pkg" - go test -covermode="$mode" -coverpkg "$pkg" -coverprofile="$f.e2e.cover" "$end2endtest" - fi - done - - echo "mode: $mode" >"$profile" - grep -h -v "^mode:" "$workdir"/*.cover >>"$profile" -} - -show_cover_report() { - go tool cover -${1}="$profile" -} - -push_to_coveralls() { - goveralls -coverprofile="$profile" -} - -generate_cover_data $(go list ./...) -show_cover_report func -case "$1" in -"") - ;; ---html) - show_cover_report html ;; ---coveralls) - push_to_coveralls ;; -*) - echo >&2 "error: invalid option: $1" ;; -esac -rm -rf "$workdir" diff --git a/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/google.golang.org/grpc/credentials/credentials.go index 0b0b89b6..681f64e4 100644 --- a/vendor/google.golang.org/grpc/credentials/credentials.go +++ b/vendor/google.golang.org/grpc/credentials/credentials.go @@ -35,7 +35,7 @@ // which encapsulate all the state needed by a client to authenticate with a // server and make various assertions, e.g., about the client's identity, role, // or whether it is authorized to make a particular call. -package credentials +package credentials // import "google.golang.org/grpc/credentials" import ( "crypto/tls" diff --git a/vendor/google.golang.org/grpc/doc.go b/vendor/google.golang.org/grpc/doc.go index b4c0e740..a35f2188 100644 --- a/vendor/google.golang.org/grpc/doc.go +++ b/vendor/google.golang.org/grpc/doc.go @@ -3,4 +3,4 @@ Package grpc implements an RPC system called gRPC. See www.grpc.io for more information about gRPC. */ -package grpc +package grpc // import "google.golang.org/grpc" diff --git a/vendor/google.golang.org/grpc/grpclog/logger.go b/vendor/google.golang.org/grpc/grpclog/logger.go index 2cc09be4..3b293307 100644 --- a/vendor/google.golang.org/grpc/grpclog/logger.go +++ b/vendor/google.golang.org/grpc/grpclog/logger.go @@ -34,7 +34,7 @@ /* Package grpclog defines logging for grpc. */ -package grpclog +package grpclog // import "google.golang.org/grpc/grpclog" import ( "log" diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go index 58469ddd..52070dbe 100644 --- a/vendor/google.golang.org/grpc/metadata/metadata.go +++ b/vendor/google.golang.org/grpc/metadata/metadata.go @@ -32,7 +32,7 @@ */ // Package metadata define the structure of the metadata supported by gRPC library. -package metadata +package metadata // import "google.golang.org/grpc/metadata" import ( "encoding/base64" diff --git a/vendor/google.golang.org/grpc/transport/transport.go b/vendor/google.golang.org/grpc/transport/transport.go index f027cae5..6eca1b3b 100644 --- a/vendor/google.golang.org/grpc/transport/transport.go +++ b/vendor/google.golang.org/grpc/transport/transport.go @@ -35,7 +35,7 @@ Package transport defines and implements message oriented communication channel to complete various transactions (e.g., an RPC). */ -package transport +package transport // import "google.golang.org/grpc/transport" import ( "bytes" diff --git a/vendor/gopkg.in/check.v1/.gitignore b/vendor/gopkg.in/check.v1/.gitignore deleted file mode 100644 index 191a5360..00000000 --- a/vendor/gopkg.in/check.v1/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -_* -*.swp -*.[568] -[568].out diff --git a/vendor/gopkg.in/check.v1/README.md b/vendor/gopkg.in/check.v1/README.md deleted file mode 100644 index 0ca9e572..00000000 --- a/vendor/gopkg.in/check.v1/README.md +++ /dev/null @@ -1,20 +0,0 @@ -Instructions -============ - -Install the package with: - - go get gopkg.in/check.v1 - -Import it with: - - import "gopkg.in/check.v1" - -and use _check_ as the package name inside the code. - -For more details, visit the project page: - -* http://labix.org/gocheck - -and the API documentation: - -* https://gopkg.in/check.v1 diff --git a/vendor/gopkg.in/check.v1/TODO b/vendor/gopkg.in/check.v1/TODO deleted file mode 100644 index 33498270..00000000 --- a/vendor/gopkg.in/check.v1/TODO +++ /dev/null @@ -1,2 +0,0 @@ -- Assert(slice, Contains, item) -- Parallel test support diff --git a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/LICENSE b/vendor/gopkg.in/square/go-jose.v1/LICENSE similarity index 100% rename from vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/LICENSE rename to vendor/gopkg.in/square/go-jose.v1/LICENSE diff --git a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/asymmetric.go b/vendor/gopkg.in/square/go-jose.v1/asymmetric.go similarity index 100% rename from vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/asymmetric.go rename to vendor/gopkg.in/square/go-jose.v1/asymmetric.go diff --git a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/cbc_hmac.go b/vendor/gopkg.in/square/go-jose.v1/cipher/cbc_hmac.go similarity index 100% rename from vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/cbc_hmac.go rename to vendor/gopkg.in/square/go-jose.v1/cipher/cbc_hmac.go diff --git a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/concat_kdf.go b/vendor/gopkg.in/square/go-jose.v1/cipher/concat_kdf.go similarity index 100% rename from vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/concat_kdf.go rename to vendor/gopkg.in/square/go-jose.v1/cipher/concat_kdf.go diff --git a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/ecdh_es.go b/vendor/gopkg.in/square/go-jose.v1/cipher/ecdh_es.go similarity index 100% rename from vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/ecdh_es.go rename to vendor/gopkg.in/square/go-jose.v1/cipher/ecdh_es.go diff --git a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/key_wrap.go b/vendor/gopkg.in/square/go-jose.v1/cipher/key_wrap.go similarity index 100% rename from vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/key_wrap.go rename to vendor/gopkg.in/square/go-jose.v1/cipher/key_wrap.go diff --git a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/crypter.go b/vendor/gopkg.in/square/go-jose.v1/crypter.go similarity index 100% rename from vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/crypter.go rename to vendor/gopkg.in/square/go-jose.v1/crypter.go diff --git a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/doc.go b/vendor/gopkg.in/square/go-jose.v1/doc.go similarity index 100% rename from vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/doc.go rename to vendor/gopkg.in/square/go-jose.v1/doc.go diff --git a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/encoding.go b/vendor/gopkg.in/square/go-jose.v1/encoding.go similarity index 100% rename from vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/encoding.go rename to vendor/gopkg.in/square/go-jose.v1/encoding.go diff --git a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/LICENSE b/vendor/gopkg.in/square/go-jose.v1/json/LICENSE similarity index 100% rename from vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/LICENSE rename to vendor/gopkg.in/square/go-jose.v1/json/LICENSE diff --git a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/decode.go b/vendor/gopkg.in/square/go-jose.v1/json/decode.go similarity index 100% rename from vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/decode.go rename to vendor/gopkg.in/square/go-jose.v1/json/decode.go diff --git a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/encode.go b/vendor/gopkg.in/square/go-jose.v1/json/encode.go similarity index 100% rename from vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/encode.go rename to vendor/gopkg.in/square/go-jose.v1/json/encode.go diff --git a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/indent.go b/vendor/gopkg.in/square/go-jose.v1/json/indent.go similarity index 100% rename from vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/indent.go rename to vendor/gopkg.in/square/go-jose.v1/json/indent.go diff --git a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/scanner.go b/vendor/gopkg.in/square/go-jose.v1/json/scanner.go similarity index 100% rename from vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/scanner.go rename to vendor/gopkg.in/square/go-jose.v1/json/scanner.go diff --git a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/stream.go b/vendor/gopkg.in/square/go-jose.v1/json/stream.go similarity index 100% rename from vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/stream.go rename to vendor/gopkg.in/square/go-jose.v1/json/stream.go diff --git a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/tags.go b/vendor/gopkg.in/square/go-jose.v1/json/tags.go similarity index 100% rename from vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/tags.go rename to vendor/gopkg.in/square/go-jose.v1/json/tags.go diff --git a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json_fork.go b/vendor/gopkg.in/square/go-jose.v1/json_fork.go similarity index 100% rename from vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json_fork.go rename to vendor/gopkg.in/square/go-jose.v1/json_fork.go diff --git a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json_std.go b/vendor/gopkg.in/square/go-jose.v1/json_std.go similarity index 100% rename from vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json_std.go rename to vendor/gopkg.in/square/go-jose.v1/json_std.go diff --git a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/jwe.go b/vendor/gopkg.in/square/go-jose.v1/jwe.go similarity index 100% rename from vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/jwe.go rename to vendor/gopkg.in/square/go-jose.v1/jwe.go diff --git a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/jwk.go b/vendor/gopkg.in/square/go-jose.v1/jwk.go similarity index 100% rename from vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/jwk.go rename to vendor/gopkg.in/square/go-jose.v1/jwk.go diff --git a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/jws.go b/vendor/gopkg.in/square/go-jose.v1/jws.go similarity index 100% rename from vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/jws.go rename to vendor/gopkg.in/square/go-jose.v1/jws.go diff --git a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/shared.go b/vendor/gopkg.in/square/go-jose.v1/shared.go similarity index 100% rename from vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/shared.go rename to vendor/gopkg.in/square/go-jose.v1/shared.go diff --git a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/signing.go b/vendor/gopkg.in/square/go-jose.v1/signing.go similarity index 100% rename from vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/signing.go rename to vendor/gopkg.in/square/go-jose.v1/signing.go diff --git a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/symmetric.go b/vendor/gopkg.in/square/go-jose.v1/symmetric.go similarity index 100% rename from vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/symmetric.go rename to vendor/gopkg.in/square/go-jose.v1/symmetric.go diff --git a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/utils.go b/vendor/gopkg.in/square/go-jose.v1/utils.go similarity index 100% rename from vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/utils.go rename to vendor/gopkg.in/square/go-jose.v1/utils.go diff --git a/vendor/gopkg.in/yaml.v2/LICENSE.libyaml b/vendor/gopkg.in/yaml.v2/LICENSE.libyaml deleted file mode 100644 index 8da58fbf..00000000 --- a/vendor/gopkg.in/yaml.v2/LICENSE.libyaml +++ /dev/null @@ -1,31 +0,0 @@ -The following files were ported to Go from C files of libyaml, and thus -are still covered by their original copyright and license: - - apic.go - emitterc.go - parserc.go - readerc.go - scannerc.go - writerc.go - yamlh.go - yamlprivateh.go - -Copyright (c) 2006 Kirill Simonov - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -of the Software, and to permit persons to whom the Software is furnished to do -so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/gopkg.in/yaml.v2/README.md b/vendor/gopkg.in/yaml.v2/README.md deleted file mode 100644 index d6c919e6..00000000 --- a/vendor/gopkg.in/yaml.v2/README.md +++ /dev/null @@ -1,128 +0,0 @@ -# YAML support for the Go language - -Introduction ------------- - -The yaml package enables Go programs to comfortably encode and decode YAML -values. It was developed within [Canonical](https://www.canonical.com) as -part of the [juju](https://juju.ubuntu.com) project, and is based on a -pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML) -C library to parse and generate YAML data quickly and reliably. - -Compatibility -------------- - -The yaml package supports most of YAML 1.1 and 1.2, including support for -anchors, tags, map merging, etc. Multi-document unmarshalling is not yet -implemented, and base-60 floats from YAML 1.1 are purposefully not -supported since they're a poor design and are gone in YAML 1.2. - -Installation and usage ----------------------- - -The import path for the package is *gopkg.in/yaml.v2*. - -To install it, run: - - go get gopkg.in/yaml.v2 - -API documentation ------------------ - -If opened in a browser, the import path itself leads to the API documentation: - - * [https://gopkg.in/yaml.v2](https://gopkg.in/yaml.v2) - -API stability -------------- - -The package API for yaml v2 will remain stable as described in [gopkg.in](https://gopkg.in). - - -License -------- - -The yaml package is licensed under the LGPL with an exception that allows it to be linked statically. Please see the LICENSE file for details. - - -Example -------- - -```Go -package main - -import ( - "fmt" - "log" - - "gopkg.in/yaml.v2" -) - -var data = ` -a: Easy! -b: - c: 2 - d: [3, 4] -` - -type T struct { - A string - B struct{C int; D []int ",flow"} -} - -func main() { - t := T{} - - err := yaml.Unmarshal([]byte(data), &t) - if err != nil { - log.Fatalf("error: %v", err) - } - fmt.Printf("--- t:\n%v\n\n", t) - - d, err := yaml.Marshal(&t) - if err != nil { - log.Fatalf("error: %v", err) - } - fmt.Printf("--- t dump:\n%s\n\n", string(d)) - - m := make(map[interface{}]interface{}) - - err = yaml.Unmarshal([]byte(data), &m) - if err != nil { - log.Fatalf("error: %v", err) - } - fmt.Printf("--- m:\n%v\n\n", m) - - d, err = yaml.Marshal(&m) - if err != nil { - log.Fatalf("error: %v", err) - } - fmt.Printf("--- m dump:\n%s\n\n", string(d)) -} -``` - -This example will generate the following output: - -``` ---- t: -{Easy! {2 [3 4]}} - ---- t dump: -a: Easy! -b: - c: 2 - d: [3, 4] - - ---- m: -map[a:Easy! b:map[c:2 d:[3 4]]] - ---- m dump: -a: Easy! -b: - c: 2 - d: - - 3 - - 4 -``` - diff --git a/vendor/rsc.io/letsencrypt/README b/vendor/rsc.io/letsencrypt/README deleted file mode 100644 index 98a875f3..00000000 --- a/vendor/rsc.io/letsencrypt/README +++ /dev/null @@ -1,152 +0,0 @@ -package letsencrypt // import "rsc.io/letsencrypt" - -Package letsencrypt obtains TLS certificates from LetsEncrypt.org. - -LetsEncrypt.org is a service that issues free SSL/TLS certificates to -servers that can prove control over the given domain's DNS records or the -servers pointed at by those records. - - -Quick Start - -A complete HTTP/HTTPS web server using TLS certificates from -LetsEncrypt.org, redirecting all HTTP access to HTTPS, and maintaining TLS -certificates in a file letsencrypt.cache across server restarts. - - package main - - import ( - "fmt" - "log" - "net/http" - "rsc.io/letsencrypt" - ) - - func main() { - http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { - fmt.Fprintf(w, "Hello, TLS!\n") - }) - var m letsencrypt.Manager - if err := m.CacheFile("letsencrypt.cache"); err != nil { - log.Fatal(err) - } - log.Fatal(m.Serve()) - } - - -Overview - -The fundamental type in this package is the Manager, which manages obtaining -and refreshing a collection of TLS certificates, typically for use by an -HTTPS server. The example above shows the most basic use of a Manager. The -use can be customized by calling additional methods of the Manager. - - -Registration - -A Manager m registers anonymously with LetsEncrypt.org, including agreeing -to the letsencrypt.org terms of service, the first time it needs to obtain a -certificate. To register with a particular email address and with the option -of a prompt for agreement with the terms of service, call m.Register. - - -GetCertificate - -The Manager's GetCertificate method returns certificates from the Manager's -cache, filling the cache by requesting certificates from LetsEncrypt.org. In -this way, a server with a tls.Config.GetCertificate set to m.GetCertificate -will demand load a certificate for any host name it serves. To force loading -of certificates ahead of time, install m.GetCertificate as before but then -call m.Cert for each host name. - -A Manager can only obtain a certificate for a given host name if it can -prove control of that host name to LetsEncrypt.org. By default it proves -control by answering an HTTPS-based challenge: when the LetsEncrypt.org -servers connect to the named host on port 443 (HTTPS), the TLS SNI handshake -must use m.GetCertificate to obtain a per-host certificate. The most common -way to satisfy this requirement is for the host name to resolve to the IP -address of a (single) computer running m.ServeHTTPS, or at least running a -Go TLS server with tls.Config.GetCertificate set to m.GetCertificate. -However, other configurations are possible. For example, a group of machines -could use an implementation of tls.Config.GetCertificate that cached -certificates but handled cache misses by making RPCs to a Manager m on an -elected leader machine. - -In typical usage, then, the setting of tls.Config.GetCertificate to -m.GetCertificate serves two purposes: it provides certificates to the TLS -server for ordinary serving, and it also answers challenges to prove -ownership of the domains in order to obtain those certificates. - -To force the loading of a certificate for a given host into the Manager's -cache, use m.Cert. - - -Persistent Storage - -If a server always starts with a zero Manager m, the server effectively -fetches a new certificate for each of its host name from LetsEncrypt.org on -each restart. This is unfortunate both because the server cannot start if -LetsEncrypt.org is unavailable and because LetsEncrypt.org limits how often -it will issue a certificate for a given host name (at time of writing, the -limit is 5 per week for a given host name). To save server state proactively -to a cache file and to reload the server state from that same file when -creating a new manager, call m.CacheFile with the name of the file to use. - -For alternate storage uses, m.Marshal returns the current state of the -Manager as an opaque string, m.Unmarshal sets the state of the Manager using -a string previously returned by m.Marshal (usually a different m), and -m.Watch returns a channel that receives notifications about state changes. - - -Limits - -To avoid hitting basic rate limits on LetsEncrypt.org, a given Manager -limits all its interactions to at most one request every minute, with an -initial allowed burst of 20 requests. - -By default, if GetCertificate is asked for a certificate it does not have, -it will in turn ask LetsEncrypt.org for that certificate. This opens a -potential attack where attackers connect to a server by IP address and -pretend to be asking for an incorrect host name. Then GetCertificate will -attempt to obtain a certificate for that host, incorrectly, eventually -hitting LetsEncrypt.org's rate limit for certificate requests and making it -impossible to obtain actual certificates. Because servers hold certificates -for months at a time, however, an attack would need to be sustained over a -time period of at least a month in order to cause real problems. - -To mitigate this kind of attack, a given Manager limits itself to an average -of one certificate request for a new host every three hours, with an initial -allowed burst of up to 20 requests. Long-running servers will therefore stay -within the LetsEncrypt.org limit of 300 failed requests per month. -Certificate refreshes are not subject to this limit. - -To eliminate the attack entirely, call m.SetHosts to enumerate the exact set -of hosts that are allowed in certificate requests. - - -Web Servers - -The basic requirement for use of a Manager is that there be an HTTPS server -running on port 443 and calling m.GetCertificate to obtain TLS certificates. -Using standard primitives, the way to do this is: - - srv := &http.Server{ - Addr: ":https", - TLSConfig: &tls.Config{ - GetCertificate: m.GetCertificate, - }, - } - srv.ListenAndServeTLS("", "") - -However, this pattern of serving HTTPS with demand-loaded TLS certificates -comes up enough to wrap into a single method m.ServeHTTPS. - -Similarly, many HTTPS servers prefer to redirect HTTP clients to the HTTPS -URLs. That functionality is provided by RedirectHTTP. - -The combination of serving HTTPS with demand-loaded TLS certificates and -serving HTTPS redirects to HTTP clients is provided by m.Serve, as used in -the original example above. - -func RedirectHTTP(w http.ResponseWriter, r *http.Request) -type Manager struct { ... } diff --git a/vendor/rsc.io/letsencrypt/lets.go b/vendor/rsc.io/letsencrypt/lets.go index 3a845363..2e5f81fc 100644 --- a/vendor/rsc.io/letsencrypt/lets.go +++ b/vendor/rsc.io/letsencrypt/lets.go @@ -8,6 +8,30 @@ // that can prove control over the given domain's DNS records or // the servers pointed at by those records. // +// Warning +// +// Like any other random code you find on the internet, this package should +// not be relied upon in important, production systems without thorough testing +// to ensure that it meets your needs. +// +// In the long term you should be using +// https://golang.org/x/crypto/acme/autocert instead of this package. +// Send improvements there, not here. +// +// This is a package that I wrote for my own personal web sites (swtch.com, rsc.io) +// in a hurry when my paid-for SSL certificate was expiring. It has no tests, +// has barely been used, and there is some anecdotal evidence that it does +// not properly renew certificates in a timely fashion, so servers that run for +// more than 3 months may run into trouble. +// I don't run this code anymore: to simplify maintenance, I moved the sites +// off of Ubuntu VMs and onto Google App Engine, configured with inexpensive +// long-term certificates purchased from cheapsslsecurity.com. +// +// This package was interesting primarily as an example of how simple the API +// for using LetsEncrypt.org could be made, in contrast to the low-level +// implementations that existed at the time. In that respect, it helped inform +// the design of the golang.org/x/crypto/acme/autocert package. +// // Quick Start // // A complete HTTP/HTTPS web server using TLS certificates from LetsEncrypt.org, @@ -428,7 +452,9 @@ func (m *Manager) register(email string, prompt func(string) bool) error { // Consequently, the state should be kept private. func (m *Manager) Marshal() string { m.init() + m.mu.Lock() js, err := json.MarshalIndent(&m.state, "", "\t") + m.mu.Unlock() if err != nil { panic("unexpected json.Marshal failure") } @@ -450,7 +476,9 @@ func (m *Manager) Unmarshal(enc string) error { } st.key = key } + m.mu.Lock() m.state = st + m.mu.Unlock() for host, cert := range m.state.Certs { c, err := cert.toTLS() if err != nil { @@ -700,7 +728,7 @@ type tlsProvider struct { } func (p tlsProvider) Present(domain, token, keyAuth string) error { - cert, dom, err := acme.TLSSNI01ChallengeCertDomain(keyAuth) + cert, dom, err := acme.TLSSNI01ChallengeCert(keyAuth) if err != nil { return err } @@ -713,7 +741,7 @@ func (p tlsProvider) Present(domain, token, keyAuth string) error { } func (p tlsProvider) CleanUp(domain, token, keyAuth string) error { - _, dom, err := acme.TLSSNI01ChallengeCertDomain(keyAuth) + _, dom, err := acme.TLSSNI01ChallengeCert(keyAuth) if err != nil { return err } diff --git a/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/client_test.go b/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/client_test.go deleted file mode 100644 index e309554f..00000000 --- a/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/client_test.go +++ /dev/null @@ -1,198 +0,0 @@ -package acme - -import ( - "crypto" - "crypto/rand" - "crypto/rsa" - "encoding/json" - "net" - "net/http" - "net/http/httptest" - "strings" - "testing" -) - -func TestNewClient(t *testing.T) { - keyBits := 32 // small value keeps test fast - keyType := RSA2048 - key, err := rsa.GenerateKey(rand.Reader, keyBits) - if err != nil { - t.Fatal("Could not generate test key:", err) - } - user := mockUser{ - email: "test@test.com", - regres: new(RegistrationResource), - privatekey: key, - } - - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - data, _ := json.Marshal(directory{NewAuthzURL: "http://test", NewCertURL: "http://test", NewRegURL: "http://test", RevokeCertURL: "http://test"}) - w.Write(data) - })) - - client, err := NewClient(ts.URL, user, keyType) - if err != nil { - t.Fatalf("Could not create client: %v", err) - } - - if client.jws == nil { - t.Fatalf("Expected client.jws to not be nil") - } - if expected, actual := key, client.jws.privKey; actual != expected { - t.Errorf("Expected jws.privKey to be %p but was %p", expected, actual) - } - - if client.keyType != keyType { - t.Errorf("Expected keyType to be %s but was %s", keyType, client.keyType) - } - - if expected, actual := 2, len(client.solvers); actual != expected { - t.Fatalf("Expected %d solver(s), got %d", expected, actual) - } -} - -func TestClientOptPort(t *testing.T) { - keyBits := 32 // small value keeps test fast - key, err := rsa.GenerateKey(rand.Reader, keyBits) - if err != nil { - t.Fatal("Could not generate test key:", err) - } - user := mockUser{ - email: "test@test.com", - regres: new(RegistrationResource), - privatekey: key, - } - - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - data, _ := json.Marshal(directory{NewAuthzURL: "http://test", NewCertURL: "http://test", NewRegURL: "http://test", RevokeCertURL: "http://test"}) - w.Write(data) - })) - - optPort := "1234" - optHost := "" - client, err := NewClient(ts.URL, user, RSA2048) - if err != nil { - t.Fatalf("Could not create client: %v", err) - } - client.SetHTTPAddress(net.JoinHostPort(optHost, optPort)) - client.SetTLSAddress(net.JoinHostPort(optHost, optPort)) - - httpSolver, ok := client.solvers[HTTP01].(*httpChallenge) - if !ok { - t.Fatal("Expected http-01 solver to be httpChallenge type") - } - if httpSolver.jws != client.jws { - t.Error("Expected http-01 to have same jws as client") - } - if got := httpSolver.provider.(*HTTPProviderServer).port; got != optPort { - t.Errorf("Expected http-01 to have port %s but was %s", optPort, got) - } - if got := httpSolver.provider.(*HTTPProviderServer).iface; got != optHost { - t.Errorf("Expected http-01 to have iface %s but was %s", optHost, got) - } - - httpsSolver, ok := client.solvers[TLSSNI01].(*tlsSNIChallenge) - if !ok { - t.Fatal("Expected tls-sni-01 solver to be httpChallenge type") - } - if httpsSolver.jws != client.jws { - t.Error("Expected tls-sni-01 to have same jws as client") - } - if got := httpsSolver.provider.(*TLSProviderServer).port; got != optPort { - t.Errorf("Expected tls-sni-01 to have port %s but was %s", optPort, got) - } - if got := httpsSolver.provider.(*TLSProviderServer).iface; got != optHost { - t.Errorf("Expected tls-sni-01 to have port %s but was %s", optHost, got) - } - - // test setting different host - optHost = "127.0.0.1" - client.SetHTTPAddress(net.JoinHostPort(optHost, optPort)) - client.SetTLSAddress(net.JoinHostPort(optHost, optPort)) - - if got := httpSolver.provider.(*HTTPProviderServer).iface; got != optHost { - t.Errorf("Expected http-01 to have iface %s but was %s", optHost, got) - } - if got := httpsSolver.provider.(*TLSProviderServer).port; got != optPort { - t.Errorf("Expected tls-sni-01 to have port %s but was %s", optPort, got) - } -} - -func TestValidate(t *testing.T) { - var statuses []string - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - // Minimal stub ACME server for validation. - w.Header().Add("Replay-Nonce", "12345") - w.Header().Add("Retry-After", "0") - switch r.Method { - case "HEAD": - case "POST": - st := statuses[0] - statuses = statuses[1:] - writeJSONResponse(w, &challenge{Type: "http-01", Status: st, URI: "http://example.com/", Token: "token"}) - - case "GET": - st := statuses[0] - statuses = statuses[1:] - writeJSONResponse(w, &challenge{Type: "http-01", Status: st, URI: "http://example.com/", Token: "token"}) - - default: - http.Error(w, r.Method, http.StatusMethodNotAllowed) - } - })) - defer ts.Close() - - privKey, _ := rsa.GenerateKey(rand.Reader, 512) - j := &jws{privKey: privKey, directoryURL: ts.URL} - - tsts := []struct { - name string - statuses []string - want string - }{ - {"POST-unexpected", []string{"weird"}, "unexpected"}, - {"POST-valid", []string{"valid"}, ""}, - {"POST-invalid", []string{"invalid"}, "Error Detail"}, - {"GET-unexpected", []string{"pending", "weird"}, "unexpected"}, - {"GET-valid", []string{"pending", "valid"}, ""}, - {"GET-invalid", []string{"pending", "invalid"}, "Error Detail"}, - } - - for _, tst := range tsts { - statuses = tst.statuses - if err := validate(j, "example.com", ts.URL, challenge{Type: "http-01", Token: "token"}); err == nil && tst.want != "" { - t.Errorf("[%s] validate: got error %v, want something with %q", tst.name, err, tst.want) - } else if err != nil && !strings.Contains(err.Error(), tst.want) { - t.Errorf("[%s] validate: got error %v, want something with %q", tst.name, err, tst.want) - } - } -} - -// writeJSONResponse marshals the body as JSON and writes it to the response. -func writeJSONResponse(w http.ResponseWriter, body interface{}) { - bs, err := json.Marshal(body) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - - w.Header().Set("Content-Type", "application/json") - if _, err := w.Write(bs); err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - } -} - -// stubValidate is like validate, except it does nothing. -func stubValidate(j *jws, domain, uri string, chlng challenge) error { - return nil -} - -type mockUser struct { - email string - regres *RegistrationResource - privatekey *rsa.PrivateKey -} - -func (u mockUser) GetEmail() string { return u.email } -func (u mockUser) GetRegistration() *RegistrationResource { return u.regres } -func (u mockUser) GetPrivateKey() crypto.PrivateKey { return u.privatekey } diff --git a/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/crypto_test.go b/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/crypto_test.go deleted file mode 100644 index d2fc5088..00000000 --- a/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/crypto_test.go +++ /dev/null @@ -1,93 +0,0 @@ -package acme - -import ( - "bytes" - "crypto/rand" - "crypto/rsa" - "testing" - "time" -) - -func TestGeneratePrivateKey(t *testing.T) { - key, err := generatePrivateKey(RSA2048) - if err != nil { - t.Error("Error generating private key:", err) - } - if key == nil { - t.Error("Expected key to not be nil, but it was") - } -} - -func TestGenerateCSR(t *testing.T) { - key, err := rsa.GenerateKey(rand.Reader, 512) - if err != nil { - t.Fatal("Error generating private key:", err) - } - - csr, err := generateCsr(key, "fizz.buzz", nil) - if err != nil { - t.Error("Error generating CSR:", err) - } - if csr == nil || len(csr) == 0 { - t.Error("Expected CSR with data, but it was nil or length 0") - } -} - -func TestPEMEncode(t *testing.T) { - buf := bytes.NewBufferString("TestingRSAIsSoMuchFun") - - reader := MockRandReader{b: buf} - key, err := rsa.GenerateKey(reader, 32) - if err != nil { - t.Fatal("Error generating private key:", err) - } - - data := pemEncode(key) - - if data == nil { - t.Fatal("Expected result to not be nil, but it was") - } - if len(data) != 127 { - t.Errorf("Expected PEM encoding to be length 127, but it was %d", len(data)) - } -} - -func TestPEMCertExpiration(t *testing.T) { - privKey, err := generatePrivateKey(RSA2048) - if err != nil { - t.Fatal("Error generating private key:", err) - } - - expiration := time.Now().Add(365) - expiration = expiration.Round(time.Second) - certBytes, err := generateDerCert(privKey.(*rsa.PrivateKey), expiration, "test.com") - if err != nil { - t.Fatal("Error generating cert:", err) - } - - buf := bytes.NewBufferString("TestingRSAIsSoMuchFun") - - // Some random string should return an error. - if ctime, err := GetPEMCertExpiration(buf.Bytes()); err == nil { - t.Errorf("Expected getCertExpiration to return an error for garbage string but returned %v", ctime) - } - - // A DER encoded certificate should return an error. - if _, err := GetPEMCertExpiration(certBytes); err == nil { - t.Errorf("Expected getCertExpiration to return an error for DER certificates but returned none.") - } - - // A PEM encoded certificate should work ok. - pemCert := pemEncode(derCertificateBytes(certBytes)) - if ctime, err := GetPEMCertExpiration(pemCert); err != nil || !ctime.Equal(expiration.UTC()) { - t.Errorf("Expected getCertExpiration to return %v but returned %v. Error: %v", expiration, ctime, err) - } -} - -type MockRandReader struct { - b *bytes.Buffer -} - -func (r MockRandReader) Read(p []byte) (int, error) { - return r.b.Read(p) -} diff --git a/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/http_challenge_test.go b/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/http_challenge_test.go deleted file mode 100644 index fdd8f4d2..00000000 --- a/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/http_challenge_test.go +++ /dev/null @@ -1,57 +0,0 @@ -package acme - -import ( - "crypto/rand" - "crypto/rsa" - "io/ioutil" - "strings" - "testing" -) - -func TestHTTPChallenge(t *testing.T) { - privKey, _ := rsa.GenerateKey(rand.Reader, 512) - j := &jws{privKey: privKey} - clientChallenge := challenge{Type: HTTP01, Token: "http1"} - mockValidate := func(_ *jws, _, _ string, chlng challenge) error { - uri := "http://localhost:23457/.well-known/acme-challenge/" + chlng.Token - resp, err := httpGet(uri) - if err != nil { - return err - } - defer resp.Body.Close() - - if want := "text/plain"; resp.Header.Get("Content-Type") != want { - t.Errorf("Get(%q) Content-Type: got %q, want %q", uri, resp.Header.Get("Content-Type"), want) - } - - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return err - } - bodyStr := string(body) - - if bodyStr != chlng.KeyAuthorization { - t.Errorf("Get(%q) Body: got %q, want %q", uri, bodyStr, chlng.KeyAuthorization) - } - - return nil - } - solver := &httpChallenge{jws: j, validate: mockValidate, provider: &HTTPProviderServer{port: "23457"}} - - if err := solver.Solve(clientChallenge, "localhost:23457"); err != nil { - t.Errorf("Solve error: got %v, want nil", err) - } -} - -func TestHTTPChallengeInvalidPort(t *testing.T) { - privKey, _ := rsa.GenerateKey(rand.Reader, 128) - j := &jws{privKey: privKey} - clientChallenge := challenge{Type: HTTP01, Token: "http2"} - solver := &httpChallenge{jws: j, validate: stubValidate, provider: &HTTPProviderServer{port: "123456"}} - - if err := solver.Solve(clientChallenge, "localhost:123456"); err == nil { - t.Errorf("Solve error: got %v, want error", err) - } else if want := "invalid port 123456"; !strings.HasSuffix(err.Error(), want) { - t.Errorf("Solve error: got %q, want suffix %q", err.Error(), want) - } -} diff --git a/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/http_test.go b/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/http_test.go deleted file mode 100644 index 33a48a33..00000000 --- a/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/http_test.go +++ /dev/null @@ -1,100 +0,0 @@ -package acme - -import ( - "net/http" - "net/http/httptest" - "strings" - "testing" -) - -func TestHTTPHeadUserAgent(t *testing.T) { - var ua, method string - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - ua = r.Header.Get("User-Agent") - method = r.Method - })) - defer ts.Close() - - _, err := httpHead(ts.URL) - if err != nil { - t.Fatal(err) - } - - if method != "HEAD" { - t.Errorf("Expected method to be HEAD, got %s", method) - } - if !strings.Contains(ua, ourUserAgent) { - t.Errorf("Expected User-Agent to contain '%s', got: '%s'", ourUserAgent, ua) - } -} - -func TestHTTPGetUserAgent(t *testing.T) { - var ua, method string - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - ua = r.Header.Get("User-Agent") - method = r.Method - })) - defer ts.Close() - - res, err := httpGet(ts.URL) - if err != nil { - t.Fatal(err) - } - res.Body.Close() - - if method != "GET" { - t.Errorf("Expected method to be GET, got %s", method) - } - if !strings.Contains(ua, ourUserAgent) { - t.Errorf("Expected User-Agent to contain '%s', got: '%s'", ourUserAgent, ua) - } -} - -func TestHTTPPostUserAgent(t *testing.T) { - var ua, method string - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - ua = r.Header.Get("User-Agent") - method = r.Method - })) - defer ts.Close() - - res, err := httpPost(ts.URL, "text/plain", strings.NewReader("falalalala")) - if err != nil { - t.Fatal(err) - } - res.Body.Close() - - if method != "POST" { - t.Errorf("Expected method to be POST, got %s", method) - } - if !strings.Contains(ua, ourUserAgent) { - t.Errorf("Expected User-Agent to contain '%s', got: '%s'", ourUserAgent, ua) - } -} - -func TestUserAgent(t *testing.T) { - ua := userAgent() - - if !strings.Contains(ua, defaultGoUserAgent) { - t.Errorf("Expected UA to contain %s, got '%s'", defaultGoUserAgent, ua) - } - if !strings.Contains(ua, ourUserAgent) { - t.Errorf("Expected UA to contain %s, got '%s'", ourUserAgent, ua) - } - if strings.HasSuffix(ua, " ") { - t.Errorf("UA should not have trailing spaces; got '%s'", ua) - } - - // customize the UA by appending a value - UserAgent = "MyApp/1.2.3" - ua = userAgent() - if !strings.Contains(ua, defaultGoUserAgent) { - t.Errorf("Expected UA to contain %s, got '%s'", defaultGoUserAgent, ua) - } - if !strings.Contains(ua, ourUserAgent) { - t.Errorf("Expected UA to contain %s, got '%s'", ourUserAgent, ua) - } - if !strings.Contains(ua, UserAgent) { - t.Errorf("Expected custom UA to contain %s, got '%s'", UserAgent, ua) - } -} diff --git a/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/tls_sni_challenge_test.go b/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/tls_sni_challenge_test.go deleted file mode 100644 index 3aec7456..00000000 --- a/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/tls_sni_challenge_test.go +++ /dev/null @@ -1,65 +0,0 @@ -package acme - -import ( - "crypto/rand" - "crypto/rsa" - "crypto/sha256" - "crypto/tls" - "encoding/hex" - "fmt" - "strings" - "testing" -) - -func TestTLSSNIChallenge(t *testing.T) { - privKey, _ := rsa.GenerateKey(rand.Reader, 512) - j := &jws{privKey: privKey} - clientChallenge := challenge{Type: TLSSNI01, Token: "tlssni1"} - mockValidate := func(_ *jws, _, _ string, chlng challenge) error { - conn, err := tls.Dial("tcp", "localhost:23457", &tls.Config{ - InsecureSkipVerify: true, - }) - if err != nil { - t.Errorf("Expected to connect to challenge server without an error. %s", err.Error()) - } - - // Expect the server to only return one certificate - connState := conn.ConnectionState() - if count := len(connState.PeerCertificates); count != 1 { - t.Errorf("Expected the challenge server to return exactly one certificate but got %d", count) - } - - remoteCert := connState.PeerCertificates[0] - if count := len(remoteCert.DNSNames); count != 1 { - t.Errorf("Expected the challenge certificate to have exactly one DNSNames entry but had %d", count) - } - - zBytes := sha256.Sum256([]byte(chlng.KeyAuthorization)) - z := hex.EncodeToString(zBytes[:sha256.Size]) - domain := fmt.Sprintf("%s.%s.acme.invalid", z[:32], z[32:]) - - if remoteCert.DNSNames[0] != domain { - t.Errorf("Expected the challenge certificate DNSName to match %s but was %s", domain, remoteCert.DNSNames[0]) - } - - return nil - } - solver := &tlsSNIChallenge{jws: j, validate: mockValidate, provider: &TLSProviderServer{port: "23457"}} - - if err := solver.Solve(clientChallenge, "localhost:23457"); err != nil { - t.Errorf("Solve error: got %v, want nil", err) - } -} - -func TestTLSSNIChallengeInvalidPort(t *testing.T) { - privKey, _ := rsa.GenerateKey(rand.Reader, 128) - j := &jws{privKey: privKey} - clientChallenge := challenge{Type: TLSSNI01, Token: "tlssni2"} - solver := &tlsSNIChallenge{jws: j, validate: stubValidate, provider: &TLSProviderServer{port: "123456"}} - - if err := solver.Solve(clientChallenge, "localhost:123456"); err == nil { - t.Errorf("Solve error: got %v, want error", err) - } else if want := "invalid port 123456"; !strings.HasSuffix(err.Error(), want) { - t.Errorf("Solve error: got %q, want suffix %q", err.Error(), want) - } -} diff --git a/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/utils_test.go b/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/utils_test.go deleted file mode 100644 index 158af411..00000000 --- a/vendor/rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme/utils_test.go +++ /dev/null @@ -1,26 +0,0 @@ -package acme - -import ( - "testing" - "time" -) - -func TestWaitForTimeout(t *testing.T) { - c := make(chan error) - go func() { - err := WaitFor(3*time.Second, 1*time.Second, func() (bool, error) { - return false, nil - }) - c <- err - }() - - timeout := time.After(4 * time.Second) - select { - case <-timeout: - t.Fatal("timeout exceeded") - case err := <-c: - if err == nil { - t.Errorf("expected timeout error; got %v", err) - } - } -} diff --git a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/BUG-BOUNTY.md b/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/BUG-BOUNTY.md deleted file mode 100644 index 97e61dbb..00000000 --- a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/BUG-BOUNTY.md +++ /dev/null @@ -1,10 +0,0 @@ -Serious about security -====================== - -Square recognizes the important contributions the security research community -can make. We therefore encourage reporting security issues with the code -contained in this repository. - -If you believe you have discovered a security vulnerability, please follow the -guidelines at . - diff --git a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/CONTRIBUTING.md b/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/CONTRIBUTING.md deleted file mode 100644 index 61b18365..00000000 --- a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/CONTRIBUTING.md +++ /dev/null @@ -1,14 +0,0 @@ -# Contributing - -If you would like to contribute code to go-jose you can do so through GitHub by -forking the repository and sending a pull request. - -When submitting code, please make every effort to follow existing conventions -and style in order to keep the code as readable as possible. Please also make -sure all tests pass by running `go test`, and format your code with `go fmt`. -We also recommend using `golint` and `errcheck`. - -Before your code can be accepted into the project you must also sign the -[Individual Contributor License Agreement][1]. - - [1]: https://spreadsheets.google.com/spreadsheet/viewform?formkey=dDViT2xzUHAwRkI3X3k5Z0lQM091OGc6MQ&ndplr=1 diff --git a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/README.md b/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/README.md deleted file mode 100644 index fd859da7..00000000 --- a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/README.md +++ /dev/null @@ -1,209 +0,0 @@ -# Go JOSE - -[![godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/gopkg.in/square/go-jose.v1) [![license](http://img.shields.io/badge/license-apache_2.0-red.svg?style=flat)](https://raw.githubusercontent.com/square/go-jose/master/LICENSE) [![build](https://travis-ci.org/square/go-jose.svg?branch=master)](https://travis-ci.org/square/go-jose) [![coverage](https://coveralls.io/repos/github/square/go-jose/badge.svg?branch=master)](https://coveralls.io/r/square/go-jose) - -Package jose aims to provide an implementation of the Javascript Object Signing -and Encryption set of standards. For the moment, it mainly focuses on encryption -and signing based on the JSON Web Encryption and JSON Web Signature standards. - -**Disclaimer**: This library contains encryption software that is subject to -the U.S. Export Administration Regulations. You may not export, re-export, -transfer or download this code or any part of it in violation of any United -States law, directive or regulation. In particular this software may not be -exported or re-exported in any form or on any media to Iran, North Sudan, -Syria, Cuba, or North Korea, or to denied persons or entities mentioned on any -US maintained blocked list. - -## Overview - -The implementation follows the -[JSON Web Encryption](http://dx.doi.org/10.17487/RFC7516) -standard (RFC 7516) and -[JSON Web Signature](http://dx.doi.org/10.17487/RFC7515) -standard (RFC 7515). Tables of supported algorithms are shown below. -The library supports both the compact and full serialization formats, and has -optional support for multiple recipients. It also comes with a small -command-line utility -([`jose-util`](https://github.com/square/go-jose/tree/master/jose-util)) -for dealing with JOSE messages in a shell. - -**Note**: We use a forked version of the `encoding/json` package from the Go -standard library which uses case-sensitive matching for member names (instead -of [case-insensitive matching](https://www.ietf.org/mail-archive/web/json/current/msg03763.html)). -This is to avoid differences in interpretation of messages between go-jose and -libraries in other languages. If you do not like this behavior, you can use the -`std_json` build tag to disable it (though we do not recommend doing so). - -### Versions - -We use [gopkg.in](https://gopkg.in) for versioning. - -[Version 1](https://gopkg.in/square/go-jose.v1) is the current stable version: - - import "gopkg.in/square/go-jose.v1" - -The interface for [go-jose.v1](https://gopkg.in/square/go-jose.v1) will remain -backwards compatible. We're currently sketching out ideas for a new version, to -clean up the interface a bit. If you have ideas or feature requests [please let -us know](https://github.com/square/go-jose/issues/64)! - -### Supported algorithms - -See below for a table of supported algorithms. Algorithm identifiers match -the names in the -[JSON Web Algorithms](http://dx.doi.org/10.17487/RFC7518) -standard where possible. The -[Godoc reference](https://godoc.org/github.com/square/go-jose#pkg-constants) -has a list of constants. - - Key encryption | Algorithm identifier(s) - :------------------------- | :------------------------------ - RSA-PKCS#1v1.5 | RSA1_5 - RSA-OAEP | RSA-OAEP, RSA-OAEP-256 - AES key wrap | A128KW, A192KW, A256KW - AES-GCM key wrap | A128GCMKW, A192GCMKW, A256GCMKW - ECDH-ES + AES key wrap | ECDH-ES+A128KW, ECDH-ES+A192KW, ECDH-ES+A256KW - ECDH-ES (direct) | ECDH-ES1 - Direct encryption | dir1 - -1. Not supported in multi-recipient mode - - Signing / MAC | Algorithm identifier(s) - :------------------------- | :------------------------------ - RSASSA-PKCS#1v1.5 | RS256, RS384, RS512 - RSASSA-PSS | PS256, PS384, PS512 - HMAC | HS256, HS384, HS512 - ECDSA | ES256, ES384, ES512 - - Content encryption | Algorithm identifier(s) - :------------------------- | :------------------------------ - AES-CBC+HMAC | A128CBC-HS256, A192CBC-HS384, A256CBC-HS512 - AES-GCM | A128GCM, A192GCM, A256GCM - - Compression | Algorithm identifiers(s) - :------------------------- | ------------------------------- - DEFLATE (RFC 1951) | DEF - -### Supported key types - -See below for a table of supported key types. These are understood by the -library, and can be passed to corresponding functions such as `NewEncrypter` or -`NewSigner`. Note that if you are creating a new encrypter or signer with a -JsonWebKey, the key id of the JsonWebKey (if present) will be added to any -resulting messages. - - Algorithm(s) | Corresponding types - :------------------------- | ------------------------------- - RSA | *[rsa.PublicKey](http://golang.org/pkg/crypto/rsa/#PublicKey), *[rsa.PrivateKey](http://golang.org/pkg/crypto/rsa/#PrivateKey), *[jose.JsonWebKey](https://godoc.org/github.com/square/go-jose#JsonWebKey) - ECDH, ECDSA | *[ecdsa.PublicKey](http://golang.org/pkg/crypto/ecdsa/#PublicKey), *[ecdsa.PrivateKey](http://golang.org/pkg/crypto/ecdsa/#PrivateKey), *[jose.JsonWebKey](https://godoc.org/github.com/square/go-jose#JsonWebKey) - AES, HMAC | []byte, *[jose.JsonWebKey](https://godoc.org/github.com/square/go-jose#JsonWebKey) - -## Examples - -Encryption/decryption example using RSA: - -```Go -// Generate a public/private key pair to use for this example. The library -// also provides two utility functions (LoadPublicKey and LoadPrivateKey) -// that can be used to load keys from PEM/DER-encoded data. -privateKey, err := rsa.GenerateKey(rand.Reader, 2048) -if err != nil { - panic(err) -} - -// Instantiate an encrypter using RSA-OAEP with AES128-GCM. An error would -// indicate that the selected algorithm(s) are not currently supported. -publicKey := &privateKey.PublicKey -encrypter, err := NewEncrypter(RSA_OAEP, A128GCM, publicKey) -if err != nil { - panic(err) -} - -// Encrypt a sample plaintext. Calling the encrypter returns an encrypted -// JWE object, which can then be serialized for output afterwards. An error -// would indicate a problem in an underlying cryptographic primitive. -var plaintext = []byte("Lorem ipsum dolor sit amet") -object, err := encrypter.Encrypt(plaintext) -if err != nil { - panic(err) -} - -// Serialize the encrypted object using the full serialization format. -// Alternatively you can also use the compact format here by calling -// object.CompactSerialize() instead. -serialized := object.FullSerialize() - -// Parse the serialized, encrypted JWE object. An error would indicate that -// the given input did not represent a valid message. -object, err = ParseEncrypted(serialized) -if err != nil { - panic(err) -} - -// Now we can decrypt and get back our original plaintext. An error here -// would indicate the the message failed to decrypt, e.g. because the auth -// tag was broken or the message was tampered with. -decrypted, err := object.Decrypt(privateKey) -if err != nil { - panic(err) -} - -fmt.Printf(string(decrypted)) -// output: Lorem ipsum dolor sit amet -``` - -Signing/verification example using RSA: - -```Go -// Generate a public/private key pair to use for this example. The library -// also provides two utility functions (LoadPublicKey and LoadPrivateKey) -// that can be used to load keys from PEM/DER-encoded data. -privateKey, err := rsa.GenerateKey(rand.Reader, 2048) -if err != nil { - panic(err) -} - -// Instantiate a signer using RSASSA-PSS (SHA512) with the given private key. -signer, err := NewSigner(PS512, privateKey) -if err != nil { - panic(err) -} - -// Sign a sample payload. Calling the signer returns a protected JWS object, -// which can then be serialized for output afterwards. An error would -// indicate a problem in an underlying cryptographic primitive. -var payload = []byte("Lorem ipsum dolor sit amet") -object, err := signer.Sign(payload) -if err != nil { - panic(err) -} - -// Serialize the encrypted object using the full serialization format. -// Alternatively you can also use the compact format here by calling -// object.CompactSerialize() instead. -serialized := object.FullSerialize() - -// Parse the serialized, protected JWS object. An error would indicate that -// the given input did not represent a valid message. -object, err = ParseSigned(serialized) -if err != nil { - panic(err) -} - -// Now we can verify the signature on the payload. An error here would -// indicate the the message failed to verify, e.g. because the signature was -// broken or the message was tampered with. -output, err := object.Verify(&privateKey.PublicKey) -if err != nil { - panic(err) -} - -fmt.Printf(string(output)) -// output: Lorem ipsum dolor sit amet -``` - -More examples can be found in the [Godoc -reference](https://godoc.org/github.com/square/go-jose) for this package. The -[`jose-util`](https://github.com/square/go-jose/tree/master/jose-util) -subdirectory also contains a small command-line utility which might -be useful as an example. diff --git a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/asymmetric_test.go b/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/asymmetric_test.go deleted file mode 100644 index 1c8c8b34..00000000 --- a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/asymmetric_test.go +++ /dev/null @@ -1,431 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package jose - -import ( - "bytes" - "crypto/rand" - "crypto/rsa" - "errors" - "io" - "math/big" - "testing" -) - -func TestVectorsRSA(t *testing.T) { - // Sources: - // http://www.emc.com/emc-plus/rsa-labs/standards-initiatives/pkcs-rsa-cryptography-standard.htm - // ftp://ftp.rsa.com/pub/rsalabs/tmp/pkcs1v15crypt-vectors.txt - priv := &rsa.PrivateKey{ - PublicKey: rsa.PublicKey{ - N: fromHexInt(` - a8b3b284af8eb50b387034a860f146c4919f318763cd6c5598c8 - ae4811a1e0abc4c7e0b082d693a5e7fced675cf4668512772c0c - bc64a742c6c630f533c8cc72f62ae833c40bf25842e984bb78bd - bf97c0107d55bdb662f5c4e0fab9845cb5148ef7392dd3aaff93 - ae1e6b667bb3d4247616d4f5ba10d4cfd226de88d39f16fb`), - E: 65537, - }, - D: fromHexInt(` - 53339cfdb79fc8466a655c7316aca85c55fd8f6dd898fdaf1195 - 17ef4f52e8fd8e258df93fee180fa0e4ab29693cd83b152a553d - 4ac4d1812b8b9fa5af0e7f55fe7304df41570926f3311f15c4d6 - 5a732c483116ee3d3d2d0af3549ad9bf7cbfb78ad884f84d5beb - 04724dc7369b31def37d0cf539e9cfcdd3de653729ead5d1`), - Primes: []*big.Int{ - fromHexInt(` - d32737e7267ffe1341b2d5c0d150a81b586fb3132bed2f8d5262 - 864a9cb9f30af38be448598d413a172efb802c21acf1c11c520c - 2f26a471dcad212eac7ca39d`), - fromHexInt(` - cc8853d1d54da630fac004f471f281c7b8982d8224a490edbeb3 - 3d3e3d5cc93c4765703d1dd791642f1f116a0dd852be2419b2af - 72bfe9a030e860b0288b5d77`), - }, - } - - input := fromHexBytes( - "6628194e12073db03ba94cda9ef9532397d50dba79b987004afefe34") - - expectedPKCS := fromHexBytes(` - 50b4c14136bd198c2f3c3ed243fce036e168d56517984a263cd66492b808 - 04f169d210f2b9bdfb48b12f9ea05009c77da257cc600ccefe3a6283789d - 8ea0e607ac58e2690ec4ebc10146e8cbaa5ed4d5cce6fe7b0ff9efc1eabb - 564dbf498285f449ee61dd7b42ee5b5892cb90601f30cda07bf26489310b - cd23b528ceab3c31`) - - expectedOAEP := fromHexBytes(` - 354fe67b4a126d5d35fe36c777791a3f7ba13def484e2d3908aff722fad4 - 68fb21696de95d0be911c2d3174f8afcc201035f7b6d8e69402de5451618 - c21a535fa9d7bfc5b8dd9fc243f8cf927db31322d6e881eaa91a996170e6 - 57a05a266426d98c88003f8477c1227094a0d9fa1e8c4024309ce1ecccb5 - 210035d47ac72e8a`) - - // Mock random reader - randReader = bytes.NewReader(fromHexBytes(` - 017341ae3875d5f87101f8cc4fa9b9bc156bb04628fccdb2f4f11e905bd3 - a155d376f593bd7304210874eba08a5e22bcccb4c9d3882a93a54db022f5 - 03d16338b6b7ce16dc7f4bbf9a96b59772d6606e9747c7649bf9e083db98 - 1884a954ab3c6f18b776ea21069d69776a33e96bad48e1dda0a5ef`)) - defer resetRandReader() - - // RSA-PKCS1v1.5 encrypt - enc := new(rsaEncrypterVerifier) - enc.publicKey = &priv.PublicKey - encryptedPKCS, err := enc.encrypt(input, RSA1_5) - if err != nil { - t.Error("Encryption failed:", err) - return - } - - if bytes.Compare(encryptedPKCS, expectedPKCS) != 0 { - t.Error("Output does not match expected value (PKCS1v1.5)") - } - - // RSA-OAEP encrypt - encryptedOAEP, err := enc.encrypt(input, RSA_OAEP) - if err != nil { - t.Error("Encryption failed:", err) - return - } - - if bytes.Compare(encryptedOAEP, expectedOAEP) != 0 { - t.Error("Output does not match expected value (OAEP)") - } - - // Need fake cipher for PKCS1v1.5 decrypt - resetRandReader() - aes := newAESGCM(len(input)) - - keygen := randomKeyGenerator{ - size: aes.keySize(), - } - - // RSA-PKCS1v1.5 decrypt - dec := new(rsaDecrypterSigner) - dec.privateKey = priv - decryptedPKCS, err := dec.decrypt(encryptedPKCS, RSA1_5, keygen) - if err != nil { - t.Error("Decryption failed:", err) - return - } - - if bytes.Compare(input, decryptedPKCS) != 0 { - t.Error("Output does not match expected value (PKCS1v1.5)") - } - - // RSA-OAEP decrypt - decryptedOAEP, err := dec.decrypt(encryptedOAEP, RSA_OAEP, keygen) - if err != nil { - t.Error("decryption failed:", err) - return - } - - if bytes.Compare(input, decryptedOAEP) != 0 { - t.Error("output does not match expected value (OAEP)") - } -} - -func TestInvalidAlgorithmsRSA(t *testing.T) { - _, err := newRSARecipient("XYZ", nil) - if err != ErrUnsupportedAlgorithm { - t.Error("should return error on invalid algorithm") - } - - _, err = newRSASigner("XYZ", nil) - if err != ErrUnsupportedAlgorithm { - t.Error("should return error on invalid algorithm") - } - - enc := new(rsaEncrypterVerifier) - enc.publicKey = &rsaTestKey.PublicKey - _, err = enc.encryptKey([]byte{}, "XYZ") - if err != ErrUnsupportedAlgorithm { - t.Error("should return error on invalid algorithm") - } - - err = enc.verifyPayload([]byte{}, []byte{}, "XYZ") - if err != ErrUnsupportedAlgorithm { - t.Error("should return error on invalid algorithm") - } - - dec := new(rsaDecrypterSigner) - dec.privateKey = rsaTestKey - _, err = dec.decrypt(make([]byte, 256), "XYZ", randomKeyGenerator{size: 16}) - if err != ErrUnsupportedAlgorithm { - t.Error("should return error on invalid algorithm") - } - - _, err = dec.signPayload([]byte{}, "XYZ") - if err != ErrUnsupportedAlgorithm { - t.Error("should return error on invalid algorithm") - } -} - -type failingKeyGenerator struct{} - -func (ctx failingKeyGenerator) keySize() int { - return 0 -} - -func (ctx failingKeyGenerator) genKey() ([]byte, rawHeader, error) { - return nil, rawHeader{}, errors.New("failed to generate key") -} - -func TestPKCSKeyGeneratorFailure(t *testing.T) { - dec := new(rsaDecrypterSigner) - dec.privateKey = rsaTestKey - generator := failingKeyGenerator{} - _, err := dec.decrypt(make([]byte, 256), RSA1_5, generator) - if err != ErrCryptoFailure { - t.Error("should return error on invalid algorithm") - } -} - -func TestInvalidAlgorithmsEC(t *testing.T) { - _, err := newECDHRecipient("XYZ", nil) - if err != ErrUnsupportedAlgorithm { - t.Error("should return error on invalid algorithm") - } - - _, err = newECDSASigner("XYZ", nil) - if err != ErrUnsupportedAlgorithm { - t.Error("should return error on invalid algorithm") - } - - enc := new(ecEncrypterVerifier) - enc.publicKey = &ecTestKey256.PublicKey - _, err = enc.encryptKey([]byte{}, "XYZ") - if err != ErrUnsupportedAlgorithm { - t.Error("should return error on invalid algorithm") - } -} - -func TestInvalidECKeyGen(t *testing.T) { - gen := ecKeyGenerator{ - size: 16, - algID: "A128GCM", - publicKey: &ecTestKey256.PublicKey, - } - - if gen.keySize() != 16 { - t.Error("ec key generator reported incorrect key size") - } - - _, _, err := gen.genKey() - if err != nil { - t.Error("ec key generator failed to generate key", err) - } -} - -func TestInvalidECDecrypt(t *testing.T) { - dec := ecDecrypterSigner{ - privateKey: ecTestKey256, - } - - generator := randomKeyGenerator{size: 16} - - // Missing epk header - headers := rawHeader{ - Alg: string(ECDH_ES), - } - - _, err := dec.decryptKey(headers, nil, generator) - if err == nil { - t.Error("ec decrypter accepted object with missing epk header") - } - - // Invalid epk header - headers.Epk = &JsonWebKey{} - - _, err = dec.decryptKey(headers, nil, generator) - if err == nil { - t.Error("ec decrypter accepted object with invalid epk header") - } -} - -func TestDecryptWithIncorrectSize(t *testing.T) { - priv, err := rsa.GenerateKey(rand.Reader, 2048) - if err != nil { - t.Error(err) - return - } - - dec := new(rsaDecrypterSigner) - dec.privateKey = priv - aes := newAESGCM(16) - - keygen := randomKeyGenerator{ - size: aes.keySize(), - } - - payload := make([]byte, 254) - _, err = dec.decrypt(payload, RSA1_5, keygen) - if err == nil { - t.Error("Invalid payload size should return error") - } - - payload = make([]byte, 257) - _, err = dec.decrypt(payload, RSA1_5, keygen) - if err == nil { - t.Error("Invalid payload size should return error") - } -} - -func TestPKCSDecryptNeverFails(t *testing.T) { - // We don't want RSA-PKCS1 v1.5 decryption to ever fail, in order to prevent - // side-channel timing attacks (Bleichenbacher attack in particular). - priv, err := rsa.GenerateKey(rand.Reader, 2048) - if err != nil { - t.Error(err) - return - } - - dec := new(rsaDecrypterSigner) - dec.privateKey = priv - aes := newAESGCM(16) - - keygen := randomKeyGenerator{ - size: aes.keySize(), - } - - for i := 1; i < 50; i++ { - payload := make([]byte, 256) - _, err := io.ReadFull(rand.Reader, payload) - if err != nil { - t.Error("Unable to get random data:", err) - return - } - _, err = dec.decrypt(payload, RSA1_5, keygen) - if err != nil { - t.Error("PKCS1v1.5 decrypt should never fail:", err) - return - } - } -} - -func BenchmarkPKCSDecryptWithValidPayloads(b *testing.B) { - priv, err := rsa.GenerateKey(rand.Reader, 2048) - if err != nil { - panic(err) - } - - enc := new(rsaEncrypterVerifier) - enc.publicKey = &priv.PublicKey - dec := new(rsaDecrypterSigner) - dec.privateKey = priv - aes := newAESGCM(32) - - b.StopTimer() - b.ResetTimer() - for i := 0; i < b.N; i++ { - plaintext := make([]byte, 32) - _, err = io.ReadFull(rand.Reader, plaintext) - if err != nil { - panic(err) - } - - ciphertext, err := enc.encrypt(plaintext, RSA1_5) - if err != nil { - panic(err) - } - - keygen := randomKeyGenerator{ - size: aes.keySize(), - } - - b.StartTimer() - _, err = dec.decrypt(ciphertext, RSA1_5, keygen) - b.StopTimer() - if err != nil { - panic(err) - } - } -} - -func BenchmarkPKCSDecryptWithInvalidPayloads(b *testing.B) { - priv, err := rsa.GenerateKey(rand.Reader, 2048) - if err != nil { - panic(err) - } - - enc := new(rsaEncrypterVerifier) - enc.publicKey = &priv.PublicKey - dec := new(rsaDecrypterSigner) - dec.privateKey = priv - aes := newAESGCM(16) - - keygen := randomKeyGenerator{ - size: aes.keySize(), - } - - b.StopTimer() - b.ResetTimer() - for i := 0; i < b.N; i++ { - plaintext := make([]byte, 16) - _, err = io.ReadFull(rand.Reader, plaintext) - if err != nil { - panic(err) - } - - ciphertext, err := enc.encrypt(plaintext, RSA1_5) - if err != nil { - panic(err) - } - - // Do some simple scrambling - ciphertext[128] ^= 0xFF - - b.StartTimer() - _, err = dec.decrypt(ciphertext, RSA1_5, keygen) - b.StopTimer() - if err != nil { - panic(err) - } - } -} - -func TestInvalidEllipticCurve(t *testing.T) { - signer256 := ecDecrypterSigner{privateKey: ecTestKey256} - signer384 := ecDecrypterSigner{privateKey: ecTestKey384} - signer521 := ecDecrypterSigner{privateKey: ecTestKey521} - - _, err := signer256.signPayload([]byte{}, ES384) - if err == nil { - t.Error("should not generate ES384 signature with P-256 key") - } - _, err = signer256.signPayload([]byte{}, ES512) - if err == nil { - t.Error("should not generate ES512 signature with P-256 key") - } - _, err = signer384.signPayload([]byte{}, ES256) - if err == nil { - t.Error("should not generate ES256 signature with P-384 key") - } - _, err = signer384.signPayload([]byte{}, ES512) - if err == nil { - t.Error("should not generate ES512 signature with P-384 key") - } - _, err = signer521.signPayload([]byte{}, ES256) - if err == nil { - t.Error("should not generate ES256 signature with P-521 key") - } - _, err = signer521.signPayload([]byte{}, ES384) - if err == nil { - t.Error("should not generate ES384 signature with P-521 key") - } -} diff --git a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/cbc_hmac_test.go b/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/cbc_hmac_test.go deleted file mode 100644 index c230271b..00000000 --- a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/cbc_hmac_test.go +++ /dev/null @@ -1,498 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package josecipher - -import ( - "bytes" - "crypto/aes" - "crypto/cipher" - "crypto/rand" - "io" - "strings" - "testing" -) - -func TestInvalidInputs(t *testing.T) { - key := []byte{ - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, - } - - nonce := []byte{ - 92, 80, 104, 49, 133, 25, 161, 215, 173, 101, 219, 211, 136, 91, 210, 145} - - aead, _ := NewCBCHMAC(key, aes.NewCipher) - ciphertext := aead.Seal(nil, nonce, []byte("plaintext"), []byte("aad")) - - // Changed AAD, must fail - _, err := aead.Open(nil, nonce, ciphertext, []byte("INVALID")) - if err == nil { - t.Error("must detect invalid aad") - } - - // Empty ciphertext, must fail - _, err = aead.Open(nil, nonce, []byte{}, []byte("aad")) - if err == nil { - t.Error("must detect invalid/empty ciphertext") - } - - // Corrupt ciphertext, must fail - corrupt := make([]byte, len(ciphertext)) - copy(corrupt, ciphertext) - corrupt[0] ^= 0xFF - - _, err = aead.Open(nil, nonce, corrupt, []byte("aad")) - if err == nil { - t.Error("must detect corrupt ciphertext") - } - - // Corrupt authtag, must fail - copy(corrupt, ciphertext) - corrupt[len(ciphertext)-1] ^= 0xFF - - _, err = aead.Open(nil, nonce, corrupt, []byte("aad")) - if err == nil { - t.Error("must detect corrupt authtag") - } - - // Truncated data, must fail - _, err = aead.Open(nil, nonce, ciphertext[:10], []byte("aad")) - if err == nil { - t.Error("must detect corrupt authtag") - } -} - -func TestVectorsAESCBC128(t *testing.T) { - // Source: http://tools.ietf.org/html/draft-ietf-jose-json-web-encryption-29#appendix-A.2 - plaintext := []byte{ - 76, 105, 118, 101, 32, 108, 111, 110, 103, 32, 97, 110, 100, 32, - 112, 114, 111, 115, 112, 101, 114, 46} - - aad := []byte{ - 101, 121, 74, 104, 98, 71, 99, 105, 79, 105, 74, 83, 85, 48, 69, - 120, 88, 122, 85, 105, 76, 67, 74, 108, 98, 109, 77, 105, 79, 105, - 74, 66, 77, 84, 73, 52, 81, 48, 74, 68, 76, 85, 104, 84, 77, 106, 85, - 50, 73, 110, 48} - - expectedCiphertext := []byte{ - 40, 57, 83, 181, 119, 33, 133, 148, 198, 185, 243, 24, 152, 230, 6, - 75, 129, 223, 127, 19, 210, 82, 183, 230, 168, 33, 215, 104, 143, - 112, 56, 102} - - expectedAuthtag := []byte{ - 246, 17, 244, 190, 4, 95, 98, 3, 231, 0, 115, 157, 242, 203, 100, - 191} - - key := []byte{ - 4, 211, 31, 197, 84, 157, 252, 254, 11, 100, 157, 250, 63, 170, 106, 206, - 107, 124, 212, 45, 111, 107, 9, 219, 200, 177, 0, 240, 143, 156, 44, 207} - - nonce := []byte{ - 3, 22, 60, 12, 43, 67, 104, 105, 108, 108, 105, 99, 111, 116, 104, 101} - - enc, err := NewCBCHMAC(key, aes.NewCipher) - out := enc.Seal(nil, nonce, plaintext, aad) - if err != nil { - t.Error("Unable to encrypt:", err) - return - } - - if bytes.Compare(out[:len(out)-16], expectedCiphertext) != 0 { - t.Error("Ciphertext did not match") - } - if bytes.Compare(out[len(out)-16:], expectedAuthtag) != 0 { - t.Error("Auth tag did not match") - } -} - -func TestVectorsAESCBC256(t *testing.T) { - // Source: https://tools.ietf.org/html/draft-mcgrew-aead-aes-cbc-hmac-sha2-05#section-5.4 - plaintext := []byte{ - 0x41, 0x20, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x20, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x20, - 0x6d, 0x75, 0x73, 0x74, 0x20, 0x6e, 0x6f, 0x74, 0x20, 0x62, 0x65, 0x20, 0x72, 0x65, 0x71, 0x75, - 0x69, 0x72, 0x65, 0x64, 0x20, 0x74, 0x6f, 0x20, 0x62, 0x65, 0x20, 0x73, 0x65, 0x63, 0x72, 0x65, - 0x74, 0x2c, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x69, 0x74, 0x20, 0x6d, 0x75, 0x73, 0x74, 0x20, 0x62, - 0x65, 0x20, 0x61, 0x62, 0x6c, 0x65, 0x20, 0x74, 0x6f, 0x20, 0x66, 0x61, 0x6c, 0x6c, 0x20, 0x69, - 0x6e, 0x74, 0x6f, 0x20, 0x74, 0x68, 0x65, 0x20, 0x68, 0x61, 0x6e, 0x64, 0x73, 0x20, 0x6f, 0x66, - 0x20, 0x74, 0x68, 0x65, 0x20, 0x65, 0x6e, 0x65, 0x6d, 0x79, 0x20, 0x77, 0x69, 0x74, 0x68, 0x6f, - 0x75, 0x74, 0x20, 0x69, 0x6e, 0x63, 0x6f, 0x6e, 0x76, 0x65, 0x6e, 0x69, 0x65, 0x6e, 0x63, 0x65} - - aad := []byte{ - 0x54, 0x68, 0x65, 0x20, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x20, 0x70, 0x72, 0x69, 0x6e, 0x63, - 0x69, 0x70, 0x6c, 0x65, 0x20, 0x6f, 0x66, 0x20, 0x41, 0x75, 0x67, 0x75, 0x73, 0x74, 0x65, 0x20, - 0x4b, 0x65, 0x72, 0x63, 0x6b, 0x68, 0x6f, 0x66, 0x66, 0x73} - - expectedCiphertext := []byte{ - 0x4a, 0xff, 0xaa, 0xad, 0xb7, 0x8c, 0x31, 0xc5, 0xda, 0x4b, 0x1b, 0x59, 0x0d, 0x10, 0xff, 0xbd, - 0x3d, 0xd8, 0xd5, 0xd3, 0x02, 0x42, 0x35, 0x26, 0x91, 0x2d, 0xa0, 0x37, 0xec, 0xbc, 0xc7, 0xbd, - 0x82, 0x2c, 0x30, 0x1d, 0xd6, 0x7c, 0x37, 0x3b, 0xcc, 0xb5, 0x84, 0xad, 0x3e, 0x92, 0x79, 0xc2, - 0xe6, 0xd1, 0x2a, 0x13, 0x74, 0xb7, 0x7f, 0x07, 0x75, 0x53, 0xdf, 0x82, 0x94, 0x10, 0x44, 0x6b, - 0x36, 0xeb, 0xd9, 0x70, 0x66, 0x29, 0x6a, 0xe6, 0x42, 0x7e, 0xa7, 0x5c, 0x2e, 0x08, 0x46, 0xa1, - 0x1a, 0x09, 0xcc, 0xf5, 0x37, 0x0d, 0xc8, 0x0b, 0xfe, 0xcb, 0xad, 0x28, 0xc7, 0x3f, 0x09, 0xb3, - 0xa3, 0xb7, 0x5e, 0x66, 0x2a, 0x25, 0x94, 0x41, 0x0a, 0xe4, 0x96, 0xb2, 0xe2, 0xe6, 0x60, 0x9e, - 0x31, 0xe6, 0xe0, 0x2c, 0xc8, 0x37, 0xf0, 0x53, 0xd2, 0x1f, 0x37, 0xff, 0x4f, 0x51, 0x95, 0x0b, - 0xbe, 0x26, 0x38, 0xd0, 0x9d, 0xd7, 0xa4, 0x93, 0x09, 0x30, 0x80, 0x6d, 0x07, 0x03, 0xb1, 0xf6} - - expectedAuthtag := []byte{ - 0x4d, 0xd3, 0xb4, 0xc0, 0x88, 0xa7, 0xf4, 0x5c, 0x21, 0x68, 0x39, 0x64, 0x5b, 0x20, 0x12, 0xbf, - 0x2e, 0x62, 0x69, 0xa8, 0xc5, 0x6a, 0x81, 0x6d, 0xbc, 0x1b, 0x26, 0x77, 0x61, 0x95, 0x5b, 0xc5} - - key := []byte{ - 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, - 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, - 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, - 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f} - - nonce := []byte{ - 0x1a, 0xf3, 0x8c, 0x2d, 0xc2, 0xb9, 0x6f, 0xfd, 0xd8, 0x66, 0x94, 0x09, 0x23, 0x41, 0xbc, 0x04} - - enc, err := NewCBCHMAC(key, aes.NewCipher) - out := enc.Seal(nil, nonce, plaintext, aad) - if err != nil { - t.Error("Unable to encrypt:", err) - return - } - - if bytes.Compare(out[:len(out)-32], expectedCiphertext) != 0 { - t.Error("Ciphertext did not match, got", out[:len(out)-32], "wanted", expectedCiphertext) - } - if bytes.Compare(out[len(out)-32:], expectedAuthtag) != 0 { - t.Error("Auth tag did not match, got", out[len(out)-32:], "wanted", expectedAuthtag) - } -} - -func TestAESCBCRoundtrip(t *testing.T) { - key128 := []byte{ - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15} - - key192 := []byte{ - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, - 0, 1, 2, 3, 4, 5, 6, 7, - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, - 0, 1, 2, 3, 4, 5, 6, 7} - - key256 := []byte{ - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15} - - nonce := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15} - - RunRoundtrip(t, key128, nonce) - RunRoundtrip(t, key192, nonce) - RunRoundtrip(t, key256, nonce) -} - -func RunRoundtrip(t *testing.T, key, nonce []byte) { - aead, err := NewCBCHMAC(key, aes.NewCipher) - if err != nil { - panic(err) - } - - if aead.NonceSize() != len(nonce) { - panic("invalid nonce") - } - - // Test pre-existing data in dst buffer - dst := []byte{15, 15, 15, 15} - plaintext := []byte{0, 0, 0, 0} - aad := []byte{4, 3, 2, 1} - - result := aead.Seal(dst, nonce, plaintext, aad) - if bytes.Compare(dst, result[:4]) != 0 { - t.Error("Existing data in dst not preserved") - } - - // Test pre-existing (empty) dst buffer with sufficient capacity - dst = make([]byte, 256)[:0] - result, err = aead.Open(dst, nonce, result[4:], aad) - if err != nil { - panic(err) - } - - if bytes.Compare(result, plaintext) != 0 { - t.Error("Plaintext does not match output") - } -} - -func TestAESCBCOverhead(t *testing.T) { - aead, err := NewCBCHMAC(make([]byte, 32), aes.NewCipher) - if err != nil { - panic(err) - } - - if aead.Overhead() != 32 { - t.Error("CBC-HMAC reports incorrect overhead value") - } -} - -func TestPadding(t *testing.T) { - for i := 0; i < 256; i++ { - slice := make([]byte, i) - padded := padBuffer(slice, 16) - if len(padded)%16 != 0 { - t.Error("failed to pad slice properly", i) - return - } - unpadded, err := unpadBuffer(padded, 16) - if err != nil || len(unpadded) != i { - t.Error("failed to unpad slice properly", i) - return - } - } -} - -func TestInvalidKey(t *testing.T) { - key := make([]byte, 30) - _, err := NewCBCHMAC(key, aes.NewCipher) - if err == nil { - t.Error("should not be able to instantiate CBC-HMAC with invalid key") - } -} - -func TestTruncatedCiphertext(t *testing.T) { - key := make([]byte, 32) - nonce := make([]byte, 16) - data := make([]byte, 32) - - io.ReadFull(rand.Reader, key) - io.ReadFull(rand.Reader, nonce) - - aead, err := NewCBCHMAC(key, aes.NewCipher) - if err != nil { - panic(err) - } - - ctx := aead.(*cbcAEAD) - ct := aead.Seal(nil, nonce, data, nil) - - // Truncated ciphertext, but with correct auth tag - truncated, tail := resize(ct[:len(ct)-ctx.authtagBytes-2], len(ct)-2) - copy(tail, ctx.computeAuthTag(nil, nonce, truncated[:len(truncated)-ctx.authtagBytes])) - - // Open should fail - _, err = aead.Open(nil, nonce, truncated, nil) - if err == nil { - t.Error("open on truncated ciphertext should fail") - } -} - -func TestInvalidPaddingOpen(t *testing.T) { - key := make([]byte, 32) - nonce := make([]byte, 16) - - // Plaintext with invalid padding - plaintext := padBuffer(make([]byte, 28), aes.BlockSize) - plaintext[len(plaintext)-1] = 0xFF - - io.ReadFull(rand.Reader, key) - io.ReadFull(rand.Reader, nonce) - - block, _ := aes.NewCipher(key) - cbc := cipher.NewCBCEncrypter(block, nonce) - buffer := append([]byte{}, plaintext...) - cbc.CryptBlocks(buffer, buffer) - - aead, _ := NewCBCHMAC(key, aes.NewCipher) - ctx := aead.(*cbcAEAD) - - // Mutated ciphertext, but with correct auth tag - size := len(buffer) - ciphertext, tail := resize(buffer, size+(len(key)/2)) - copy(tail, ctx.computeAuthTag(nil, nonce, ciphertext[:size])) - - // Open should fail (b/c of invalid padding, even though tag matches) - _, err := aead.Open(nil, nonce, ciphertext, nil) - if err == nil || !strings.Contains(err.Error(), "invalid padding") { - t.Error("no or unexpected error on open with invalid padding:", err) - } -} - -func TestInvalidPadding(t *testing.T) { - for i := 0; i < 256; i++ { - slice := make([]byte, i) - padded := padBuffer(slice, 16) - if len(padded)%16 != 0 { - t.Error("failed to pad slice properly", i) - return - } - - paddingBytes := 16 - (i % 16) - - // Mutate padding for testing - for j := 1; j <= paddingBytes; j++ { - mutated := make([]byte, len(padded)) - copy(mutated, padded) - mutated[len(mutated)-j] ^= 0xFF - - _, err := unpadBuffer(mutated, 16) - if err == nil { - t.Error("unpad on invalid padding should fail", i) - return - } - } - - // Test truncated padding - _, err := unpadBuffer(padded[:len(padded)-1], 16) - if err == nil { - t.Error("unpad on truncated padding should fail", i) - return - } - } -} - -func TestZeroLengthPadding(t *testing.T) { - data := make([]byte, 16) - data, err := unpadBuffer(data, 16) - if err == nil { - t.Error("padding with 0x00 should never be valid") - } -} - -func benchEncryptCBCHMAC(b *testing.B, keySize, chunkSize int) { - key := make([]byte, keySize*2) - nonce := make([]byte, 16) - - io.ReadFull(rand.Reader, key) - io.ReadFull(rand.Reader, nonce) - - chunk := make([]byte, chunkSize) - - aead, err := NewCBCHMAC(key, aes.NewCipher) - if err != nil { - panic(err) - } - - b.SetBytes(int64(chunkSize)) - b.ResetTimer() - for i := 0; i < b.N; i++ { - aead.Seal(nil, nonce, chunk, nil) - } -} - -func benchDecryptCBCHMAC(b *testing.B, keySize, chunkSize int) { - key := make([]byte, keySize*2) - nonce := make([]byte, 16) - - io.ReadFull(rand.Reader, key) - io.ReadFull(rand.Reader, nonce) - - chunk := make([]byte, chunkSize) - - aead, err := NewCBCHMAC(key, aes.NewCipher) - if err != nil { - panic(err) - } - - out := aead.Seal(nil, nonce, chunk, nil) - - b.SetBytes(int64(chunkSize)) - b.ResetTimer() - for i := 0; i < b.N; i++ { - aead.Open(nil, nonce, out, nil) - } -} - -func BenchmarkEncryptAES128_CBCHMAC_1k(b *testing.B) { - benchEncryptCBCHMAC(b, 16, 1024) -} - -func BenchmarkEncryptAES128_CBCHMAC_64k(b *testing.B) { - benchEncryptCBCHMAC(b, 16, 65536) -} - -func BenchmarkEncryptAES128_CBCHMAC_1MB(b *testing.B) { - benchEncryptCBCHMAC(b, 16, 1048576) -} - -func BenchmarkEncryptAES128_CBCHMAC_64MB(b *testing.B) { - benchEncryptCBCHMAC(b, 16, 67108864) -} - -func BenchmarkDecryptAES128_CBCHMAC_1k(b *testing.B) { - benchDecryptCBCHMAC(b, 16, 1024) -} - -func BenchmarkDecryptAES128_CBCHMAC_64k(b *testing.B) { - benchDecryptCBCHMAC(b, 16, 65536) -} - -func BenchmarkDecryptAES128_CBCHMAC_1MB(b *testing.B) { - benchDecryptCBCHMAC(b, 16, 1048576) -} - -func BenchmarkDecryptAES128_CBCHMAC_64MB(b *testing.B) { - benchDecryptCBCHMAC(b, 16, 67108864) -} - -func BenchmarkEncryptAES192_CBCHMAC_64k(b *testing.B) { - benchEncryptCBCHMAC(b, 24, 65536) -} - -func BenchmarkEncryptAES192_CBCHMAC_1MB(b *testing.B) { - benchEncryptCBCHMAC(b, 24, 1048576) -} - -func BenchmarkEncryptAES192_CBCHMAC_64MB(b *testing.B) { - benchEncryptCBCHMAC(b, 24, 67108864) -} - -func BenchmarkDecryptAES192_CBCHMAC_1k(b *testing.B) { - benchDecryptCBCHMAC(b, 24, 1024) -} - -func BenchmarkDecryptAES192_CBCHMAC_64k(b *testing.B) { - benchDecryptCBCHMAC(b, 24, 65536) -} - -func BenchmarkDecryptAES192_CBCHMAC_1MB(b *testing.B) { - benchDecryptCBCHMAC(b, 24, 1048576) -} - -func BenchmarkDecryptAES192_CBCHMAC_64MB(b *testing.B) { - benchDecryptCBCHMAC(b, 24, 67108864) -} - -func BenchmarkEncryptAES256_CBCHMAC_64k(b *testing.B) { - benchEncryptCBCHMAC(b, 32, 65536) -} - -func BenchmarkEncryptAES256_CBCHMAC_1MB(b *testing.B) { - benchEncryptCBCHMAC(b, 32, 1048576) -} - -func BenchmarkEncryptAES256_CBCHMAC_64MB(b *testing.B) { - benchEncryptCBCHMAC(b, 32, 67108864) -} - -func BenchmarkDecryptAES256_CBCHMAC_1k(b *testing.B) { - benchDecryptCBCHMAC(b, 32, 1032) -} - -func BenchmarkDecryptAES256_CBCHMAC_64k(b *testing.B) { - benchDecryptCBCHMAC(b, 32, 65536) -} - -func BenchmarkDecryptAES256_CBCHMAC_1MB(b *testing.B) { - benchDecryptCBCHMAC(b, 32, 1048576) -} - -func BenchmarkDecryptAES256_CBCHMAC_64MB(b *testing.B) { - benchDecryptCBCHMAC(b, 32, 67108864) -} diff --git a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/concat_kdf_test.go b/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/concat_kdf_test.go deleted file mode 100644 index 48219b3e..00000000 --- a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/concat_kdf_test.go +++ /dev/null @@ -1,150 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package josecipher - -import ( - "bytes" - "crypto" - "testing" -) - -// Taken from: https://tools.ietf.org/id/draft-ietf-jose-json-web-algorithms-38.txt -func TestVectorConcatKDF(t *testing.T) { - z := []byte{ - 158, 86, 217, 29, 129, 113, 53, 211, 114, 131, 66, 131, 191, 132, - 38, 156, 251, 49, 110, 163, 218, 128, 106, 72, 246, 218, 167, 121, - 140, 254, 144, 196} - - algID := []byte{0, 0, 0, 7, 65, 49, 50, 56, 71, 67, 77} - - ptyUInfo := []byte{0, 0, 0, 5, 65, 108, 105, 99, 101} - ptyVInfo := []byte{0, 0, 0, 3, 66, 111, 98} - - supPubInfo := []byte{0, 0, 0, 128} - supPrivInfo := []byte{} - - expected := []byte{ - 86, 170, 141, 234, 248, 35, 109, 32, 92, 34, 40, 205, 113, 167, 16, 26} - - ckdf := NewConcatKDF(crypto.SHA256, z, algID, ptyUInfo, ptyVInfo, supPubInfo, supPrivInfo) - - out0 := make([]byte, 9) - out1 := make([]byte, 7) - - read0, err := ckdf.Read(out0) - if err != nil { - t.Error("error when reading from concat kdf reader", err) - return - } - - read1, err := ckdf.Read(out1) - if err != nil { - t.Error("error when reading from concat kdf reader", err) - return - } - - if read0+read1 != len(out0)+len(out1) { - t.Error("did not receive enough bytes from concat kdf reader") - return - } - - out := []byte{} - out = append(out, out0...) - out = append(out, out1...) - - if bytes.Compare(out, expected) != 0 { - t.Error("did not receive expected output from concat kdf reader") - return - } -} - -func TestCache(t *testing.T) { - z := []byte{ - 158, 86, 217, 29, 129, 113, 53, 211, 114, 131, 66, 131, 191, 132, - 38, 156, 251, 49, 110, 163, 218, 128, 106, 72, 246, 218, 167, 121, - 140, 254, 144, 196} - - algID := []byte{1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4} - - ptyUInfo := []byte{1, 2, 3, 4} - ptyVInfo := []byte{4, 3, 2, 1} - - supPubInfo := []byte{} - supPrivInfo := []byte{} - - outputs := [][]byte{} - - // Read the same amount of data in different chunk sizes - chunkSizes := []int{1, 2, 4, 8, 16, 32, 64, 128, 256, 512} - - for _, c := range chunkSizes { - out := make([]byte, 1024) - reader := NewConcatKDF(crypto.SHA256, z, algID, ptyUInfo, ptyVInfo, supPubInfo, supPrivInfo) - - for i := 0; i < 1024; i += c { - _, _ = reader.Read(out[i : i+c]) - } - - outputs = append(outputs, out) - } - - for i := range outputs { - if bytes.Compare(outputs[i], outputs[(i+1)%len(outputs)]) != 0 { - t.Error("not all outputs from KDF matched") - } - } -} - -func benchmarkKDF(b *testing.B, total int) { - z := []byte{ - 158, 86, 217, 29, 129, 113, 53, 211, 114, 131, 66, 131, 191, 132, - 38, 156, 251, 49, 110, 163, 218, 128, 106, 72, 246, 218, 167, 121, - 140, 254, 144, 196} - - algID := []byte{1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4} - - ptyUInfo := []byte{1, 2, 3, 4} - ptyVInfo := []byte{4, 3, 2, 1} - - supPubInfo := []byte{} - supPrivInfo := []byte{} - - out := make([]byte, total) - reader := NewConcatKDF(crypto.SHA256, z, algID, ptyUInfo, ptyVInfo, supPubInfo, supPrivInfo) - - b.ResetTimer() - b.SetBytes(int64(total)) - for i := 0; i < b.N; i++ { - _, _ = reader.Read(out) - } -} - -func BenchmarkConcatKDF_1k(b *testing.B) { - benchmarkKDF(b, 1024) -} - -func BenchmarkConcatKDF_64k(b *testing.B) { - benchmarkKDF(b, 65536) -} - -func BenchmarkConcatKDF_1MB(b *testing.B) { - benchmarkKDF(b, 1048576) -} - -func BenchmarkConcatKDF_64MB(b *testing.B) { - benchmarkKDF(b, 67108864) -} diff --git a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/ecdh_es_test.go b/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/ecdh_es_test.go deleted file mode 100644 index f92abb17..00000000 --- a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/ecdh_es_test.go +++ /dev/null @@ -1,98 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package josecipher - -import ( - "bytes" - "crypto/ecdsa" - "crypto/elliptic" - "encoding/base64" - "math/big" - "testing" -) - -// Example keys from JWA, Appendix C -var aliceKey = &ecdsa.PrivateKey{ - PublicKey: ecdsa.PublicKey{ - Curve: elliptic.P256(), - X: fromBase64Int("gI0GAILBdu7T53akrFmMyGcsF3n5dO7MmwNBHKW5SV0="), - Y: fromBase64Int("SLW_xSffzlPWrHEVI30DHM_4egVwt3NQqeUD7nMFpps="), - }, - D: fromBase64Int("0_NxaRPUMQoAJt50Gz8YiTr8gRTwyEaCumd-MToTmIo="), -} - -var bobKey = &ecdsa.PrivateKey{ - PublicKey: ecdsa.PublicKey{ - Curve: elliptic.P256(), - X: fromBase64Int("weNJy2HscCSM6AEDTDg04biOvhFhyyWvOHQfeF_PxMQ="), - Y: fromBase64Int("e8lnCO-AlStT-NJVX-crhB7QRYhiix03illJOVAOyck="), - }, - D: fromBase64Int("VEmDZpDXXK8p8N0Cndsxs924q6nS1RXFASRl6BfUqdw="), -} - -// Build big int from base64-encoded string. Strips whitespace (for testing). -func fromBase64Int(data string) *big.Int { - val, err := base64.URLEncoding.DecodeString(data) - if err != nil { - panic("Invalid test data") - } - return new(big.Int).SetBytes(val) -} - -func TestVectorECDHES(t *testing.T) { - apuData := []byte("Alice") - apvData := []byte("Bob") - - expected := []byte{ - 86, 170, 141, 234, 248, 35, 109, 32, 92, 34, 40, 205, 113, 167, 16, 26} - - output := DeriveECDHES("A128GCM", apuData, apvData, bobKey, &aliceKey.PublicKey, 16) - - if bytes.Compare(output, expected) != 0 { - t.Error("output did not match what we expect, got", output, "wanted", expected) - } -} - -func BenchmarkECDHES_128(b *testing.B) { - apuData := []byte("APU") - apvData := []byte("APV") - - b.ResetTimer() - for i := 0; i < b.N; i++ { - DeriveECDHES("ID", apuData, apvData, bobKey, &aliceKey.PublicKey, 16) - } -} - -func BenchmarkECDHES_192(b *testing.B) { - apuData := []byte("APU") - apvData := []byte("APV") - - b.ResetTimer() - for i := 0; i < b.N; i++ { - DeriveECDHES("ID", apuData, apvData, bobKey, &aliceKey.PublicKey, 24) - } -} - -func BenchmarkECDHES_256(b *testing.B) { - apuData := []byte("APU") - apvData := []byte("APV") - - b.ResetTimer() - for i := 0; i < b.N; i++ { - DeriveECDHES("ID", apuData, apvData, bobKey, &aliceKey.PublicKey, 32) - } -} diff --git a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/key_wrap_test.go b/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/key_wrap_test.go deleted file mode 100644 index ceecf812..00000000 --- a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher/key_wrap_test.go +++ /dev/null @@ -1,133 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package josecipher - -import ( - "bytes" - "crypto/aes" - "encoding/hex" - "testing" -) - -func TestAesKeyWrap(t *testing.T) { - // Test vectors from: http://csrc.nist.gov/groups/ST/toolkit/documents/kms/key-wrap.pdf - kek0, _ := hex.DecodeString("000102030405060708090A0B0C0D0E0F") - cek0, _ := hex.DecodeString("00112233445566778899AABBCCDDEEFF") - - expected0, _ := hex.DecodeString("1FA68B0A8112B447AEF34BD8FB5A7B829D3E862371D2CFE5") - - kek1, _ := hex.DecodeString("000102030405060708090A0B0C0D0E0F1011121314151617") - cek1, _ := hex.DecodeString("00112233445566778899AABBCCDDEEFF") - - expected1, _ := hex.DecodeString("96778B25AE6CA435F92B5B97C050AED2468AB8A17AD84E5D") - - kek2, _ := hex.DecodeString("000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F") - cek2, _ := hex.DecodeString("00112233445566778899AABBCCDDEEFF0001020304050607") - - expected2, _ := hex.DecodeString("A8F9BC1612C68B3FF6E6F4FBE30E71E4769C8B80A32CB8958CD5D17D6B254DA1") - - block0, _ := aes.NewCipher(kek0) - block1, _ := aes.NewCipher(kek1) - block2, _ := aes.NewCipher(kek2) - - out0, _ := KeyWrap(block0, cek0) - out1, _ := KeyWrap(block1, cek1) - out2, _ := KeyWrap(block2, cek2) - - if bytes.Compare(out0, expected0) != 0 { - t.Error("output 0 not as expected, got", out0, "wanted", expected0) - } - - if bytes.Compare(out1, expected1) != 0 { - t.Error("output 1 not as expected, got", out1, "wanted", expected1) - } - - if bytes.Compare(out2, expected2) != 0 { - t.Error("output 2 not as expected, got", out2, "wanted", expected2) - } - - unwrap0, _ := KeyUnwrap(block0, out0) - unwrap1, _ := KeyUnwrap(block1, out1) - unwrap2, _ := KeyUnwrap(block2, out2) - - if bytes.Compare(unwrap0, cek0) != 0 { - t.Error("key unwrap did not return original input, got", unwrap0, "wanted", cek0) - } - - if bytes.Compare(unwrap1, cek1) != 0 { - t.Error("key unwrap did not return original input, got", unwrap1, "wanted", cek1) - } - - if bytes.Compare(unwrap2, cek2) != 0 { - t.Error("key unwrap did not return original input, got", unwrap2, "wanted", cek2) - } -} - -func TestAesKeyWrapInvalid(t *testing.T) { - kek, _ := hex.DecodeString("000102030405060708090A0B0C0D0E0F") - - // Invalid unwrap input (bit flipped) - input0, _ := hex.DecodeString("1EA68C1A8112B447AEF34BD8FB5A7B828D3E862371D2CFE5") - - block, _ := aes.NewCipher(kek) - - _, err := KeyUnwrap(block, input0) - if err == nil { - t.Error("key unwrap failed to detect invalid input") - } - - // Invalid unwrap input (truncated) - input1, _ := hex.DecodeString("1EA68C1A8112B447AEF34BD8FB5A7B828D3E862371D2CF") - - _, err = KeyUnwrap(block, input1) - if err == nil { - t.Error("key unwrap failed to detect truncated input") - } - - // Invalid wrap input (not multiple of 8) - input2, _ := hex.DecodeString("0123456789ABCD") - - _, err = KeyWrap(block, input2) - if err == nil { - t.Error("key wrap accepted invalid input") - } - -} - -func BenchmarkAesKeyWrap(b *testing.B) { - kek, _ := hex.DecodeString("000102030405060708090A0B0C0D0E0F") - key, _ := hex.DecodeString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF") - - block, _ := aes.NewCipher(kek) - - b.ResetTimer() - for i := 0; i < b.N; i++ { - KeyWrap(block, key) - } -} - -func BenchmarkAesKeyUnwrap(b *testing.B) { - kek, _ := hex.DecodeString("000102030405060708090A0B0C0D0E0F") - input, _ := hex.DecodeString("1FA68B0A8112B447AEF34BD8FB5A7B829D3E862371D2CFE5") - - block, _ := aes.NewCipher(kek) - - b.ResetTimer() - for i := 0; i < b.N; i++ { - KeyUnwrap(block, input) - } -} diff --git a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/crypter_test.go b/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/crypter_test.go deleted file mode 100644 index 86b8fc0a..00000000 --- a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/crypter_test.go +++ /dev/null @@ -1,784 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package jose - -import ( - "bytes" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "crypto/rsa" - "fmt" - "io" - "testing" -) - -// We generate only a single RSA and EC key for testing, speeds up tests. -var rsaTestKey, _ = rsa.GenerateKey(rand.Reader, 2048) - -var ecTestKey256, _ = ecdsa.GenerateKey(elliptic.P256(), rand.Reader) -var ecTestKey384, _ = ecdsa.GenerateKey(elliptic.P384(), rand.Reader) -var ecTestKey521, _ = ecdsa.GenerateKey(elliptic.P521(), rand.Reader) - -func RoundtripJWE(keyAlg KeyAlgorithm, encAlg ContentEncryption, compressionAlg CompressionAlgorithm, serializer func(*JsonWebEncryption) (string, error), corrupter func(*JsonWebEncryption) bool, aad []byte, encryptionKey interface{}, decryptionKey interface{}) error { - enc, err := NewEncrypter(keyAlg, encAlg, encryptionKey) - if err != nil { - return fmt.Errorf("error on new encrypter: %s", err) - } - - enc.SetCompression(compressionAlg) - - input := []byte("Lorem ipsum dolor sit amet") - obj, err := enc.EncryptWithAuthData(input, aad) - if err != nil { - return fmt.Errorf("error in encrypt: %s", err) - } - - msg, err := serializer(obj) - if err != nil { - return fmt.Errorf("error in serializer: %s", err) - } - - parsed, err := ParseEncrypted(msg) - if err != nil { - return fmt.Errorf("error in parse: %s, on msg '%s'", err, msg) - } - - // (Maybe) mangle object - skip := corrupter(parsed) - if skip { - return fmt.Errorf("corrupter indicated message should be skipped") - } - - if bytes.Compare(parsed.GetAuthData(), aad) != 0 { - return fmt.Errorf("auth data in parsed object does not match") - } - - output, err := parsed.Decrypt(decryptionKey) - if err != nil { - return fmt.Errorf("error on decrypt: %s", err) - } - - if bytes.Compare(input, output) != 0 { - return fmt.Errorf("Decrypted output does not match input, got '%s' but wanted '%s'", output, input) - } - - return nil -} - -func TestRoundtripsJWE(t *testing.T) { - // Test matrix - keyAlgs := []KeyAlgorithm{ - DIRECT, ECDH_ES, ECDH_ES_A128KW, ECDH_ES_A192KW, ECDH_ES_A256KW, A128KW, A192KW, A256KW, - RSA1_5, RSA_OAEP, RSA_OAEP_256, A128GCMKW, A192GCMKW, A256GCMKW} - encAlgs := []ContentEncryption{A128GCM, A192GCM, A256GCM, A128CBC_HS256, A192CBC_HS384, A256CBC_HS512} - zipAlgs := []CompressionAlgorithm{NONE, DEFLATE} - - serializers := []func(*JsonWebEncryption) (string, error){ - func(obj *JsonWebEncryption) (string, error) { return obj.CompactSerialize() }, - func(obj *JsonWebEncryption) (string, error) { return obj.FullSerialize(), nil }, - } - - corrupter := func(obj *JsonWebEncryption) bool { return false } - - // Note: can't use AAD with compact serialization - aads := [][]byte{ - nil, - []byte("Ut enim ad minim veniam"), - } - - // Test all different configurations - for _, alg := range keyAlgs { - for _, enc := range encAlgs { - for _, key := range generateTestKeys(alg, enc) { - for _, zip := range zipAlgs { - for i, serializer := range serializers { - err := RoundtripJWE(alg, enc, zip, serializer, corrupter, aads[i], key.enc, key.dec) - if err != nil { - t.Error(err, alg, enc, zip, i) - } - } - } - } - } - } -} - -func TestRoundtripsJWECorrupted(t *testing.T) { - // Test matrix - keyAlgs := []KeyAlgorithm{DIRECT, ECDH_ES, ECDH_ES_A128KW, A128KW, RSA1_5, RSA_OAEP, RSA_OAEP_256, A128GCMKW} - encAlgs := []ContentEncryption{A128GCM, A192GCM, A256GCM, A128CBC_HS256, A192CBC_HS384, A256CBC_HS512} - zipAlgs := []CompressionAlgorithm{NONE, DEFLATE} - - serializers := []func(*JsonWebEncryption) (string, error){ - func(obj *JsonWebEncryption) (string, error) { return obj.CompactSerialize() }, - func(obj *JsonWebEncryption) (string, error) { return obj.FullSerialize(), nil }, - } - - bitflip := func(slice []byte) bool { - if len(slice) > 0 { - slice[0] ^= 0xFF - return false - } - return true - } - - corrupters := []func(*JsonWebEncryption) bool{ - func(obj *JsonWebEncryption) bool { - // Set invalid ciphertext - return bitflip(obj.ciphertext) - }, - func(obj *JsonWebEncryption) bool { - // Set invalid auth tag - return bitflip(obj.tag) - }, - func(obj *JsonWebEncryption) bool { - // Set invalid AAD - return bitflip(obj.aad) - }, - func(obj *JsonWebEncryption) bool { - // Mess with encrypted key - return bitflip(obj.recipients[0].encryptedKey) - }, - func(obj *JsonWebEncryption) bool { - // Mess with GCM-KW auth tag - return bitflip(obj.protected.Tag.bytes()) - }, - } - - // Note: can't use AAD with compact serialization - aads := [][]byte{ - nil, - []byte("Ut enim ad minim veniam"), - } - - // Test all different configurations - for _, alg := range keyAlgs { - for _, enc := range encAlgs { - for _, key := range generateTestKeys(alg, enc) { - for _, zip := range zipAlgs { - for i, serializer := range serializers { - for j, corrupter := range corrupters { - err := RoundtripJWE(alg, enc, zip, serializer, corrupter, aads[i], key.enc, key.dec) - if err == nil { - t.Error("failed to detect corrupt data", err, alg, enc, zip, i, j) - } - } - } - } - } - } - } -} - -func TestEncrypterWithJWKAndKeyID(t *testing.T) { - enc, err := NewEncrypter(A128KW, A128GCM, &JsonWebKey{ - KeyID: "test-id", - Key: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, - }) - if err != nil { - t.Error(err) - } - - ciphertext, _ := enc.Encrypt([]byte("Lorem ipsum dolor sit amet")) - - serialized1, _ := ciphertext.CompactSerialize() - serialized2 := ciphertext.FullSerialize() - - parsed1, _ := ParseEncrypted(serialized1) - parsed2, _ := ParseEncrypted(serialized2) - - if parsed1.Header.KeyID != "test-id" { - t.Errorf("expected message to have key id from JWK, but found '%s' instead", parsed1.Header.KeyID) - } - if parsed2.Header.KeyID != "test-id" { - t.Errorf("expected message to have key id from JWK, but found '%s' instead", parsed2.Header.KeyID) - } -} - -func TestEncrypterWithBrokenRand(t *testing.T) { - keyAlgs := []KeyAlgorithm{ECDH_ES_A128KW, A128KW, RSA1_5, RSA_OAEP, RSA_OAEP_256, A128GCMKW} - encAlgs := []ContentEncryption{A128GCM, A192GCM, A256GCM, A128CBC_HS256, A192CBC_HS384, A256CBC_HS512} - - serializer := func(obj *JsonWebEncryption) (string, error) { return obj.CompactSerialize() } - corrupter := func(obj *JsonWebEncryption) bool { return false } - - // Break rand reader - readers := []func() io.Reader{ - // Totally broken - func() io.Reader { return bytes.NewReader([]byte{}) }, - // Not enough bytes - func() io.Reader { return io.LimitReader(rand.Reader, 20) }, - } - - defer resetRandReader() - - for _, alg := range keyAlgs { - for _, enc := range encAlgs { - for _, key := range generateTestKeys(alg, enc) { - for i, getReader := range readers { - randReader = getReader() - err := RoundtripJWE(alg, enc, NONE, serializer, corrupter, nil, key.enc, key.dec) - if err == nil { - t.Error("encrypter should fail if rand is broken", i) - } - } - } - } - } -} - -func TestNewEncrypterErrors(t *testing.T) { - _, err := NewEncrypter("XYZ", "XYZ", nil) - if err == nil { - t.Error("was able to instantiate encrypter with invalid cipher") - } - - _, err = NewMultiEncrypter("XYZ") - if err == nil { - t.Error("was able to instantiate multi-encrypter with invalid cipher") - } - - _, err = NewEncrypter(DIRECT, A128GCM, nil) - if err == nil { - t.Error("was able to instantiate encrypter with invalid direct key") - } - - _, err = NewEncrypter(ECDH_ES, A128GCM, nil) - if err == nil { - t.Error("was able to instantiate encrypter with invalid EC key") - } -} - -func TestMultiRecipientJWE(t *testing.T) { - enc, err := NewMultiEncrypter(A128GCM) - if err != nil { - panic(err) - } - - err = enc.AddRecipient(RSA_OAEP, &rsaTestKey.PublicKey) - if err != nil { - t.Error("error when adding RSA recipient", err) - } - - sharedKey := []byte{ - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, - } - - err = enc.AddRecipient(A256GCMKW, sharedKey) - if err != nil { - t.Error("error when adding AES recipient: ", err) - return - } - - input := []byte("Lorem ipsum dolor sit amet") - obj, err := enc.Encrypt(input) - if err != nil { - t.Error("error in encrypt: ", err) - return - } - - msg := obj.FullSerialize() - - parsed, err := ParseEncrypted(msg) - if err != nil { - t.Error("error in parse: ", err) - return - } - - output, err := parsed.Decrypt(rsaTestKey) - if err != nil { - t.Error("error on decrypt with RSA: ", err) - return - } - - if bytes.Compare(input, output) != 0 { - t.Error("Decrypted output does not match input: ", output, input) - return - } - - output, err = parsed.Decrypt(sharedKey) - if err != nil { - t.Error("error on decrypt with AES: ", err) - return - } - - if bytes.Compare(input, output) != 0 { - t.Error("Decrypted output does not match input", output, input) - return - } -} - -func TestMultiRecipientErrors(t *testing.T) { - enc, err := NewMultiEncrypter(A128GCM) - if err != nil { - panic(err) - } - - input := []byte("Lorem ipsum dolor sit amet") - _, err = enc.Encrypt(input) - if err == nil { - t.Error("should fail when encrypting to zero recipients") - } - - err = enc.AddRecipient(DIRECT, nil) - if err == nil { - t.Error("should reject DIRECT mode when encrypting to multiple recipients") - } - - err = enc.AddRecipient(ECDH_ES, nil) - if err == nil { - t.Error("should reject ECDH_ES mode when encrypting to multiple recipients") - } - - err = enc.AddRecipient(RSA1_5, nil) - if err == nil { - t.Error("should reject invalid recipient key") - } -} - -type testKey struct { - enc, dec interface{} -} - -func symmetricTestKey(size int) []testKey { - key, _, _ := randomKeyGenerator{size: size}.genKey() - - return []testKey{ - testKey{ - enc: key, - dec: key, - }, - testKey{ - enc: &JsonWebKey{KeyID: "test", Key: key}, - dec: &JsonWebKey{KeyID: "test", Key: key}, - }, - } -} - -func generateTestKeys(keyAlg KeyAlgorithm, encAlg ContentEncryption) []testKey { - switch keyAlg { - case DIRECT: - return symmetricTestKey(getContentCipher(encAlg).keySize()) - case ECDH_ES, ECDH_ES_A128KW, ECDH_ES_A192KW, ECDH_ES_A256KW: - return []testKey{ - testKey{ - dec: ecTestKey256, - enc: &ecTestKey256.PublicKey, - }, - testKey{ - dec: ecTestKey384, - enc: &ecTestKey384.PublicKey, - }, - testKey{ - dec: ecTestKey521, - enc: &ecTestKey521.PublicKey, - }, - testKey{ - dec: &JsonWebKey{KeyID: "test", Key: ecTestKey256}, - enc: &JsonWebKey{KeyID: "test", Key: &ecTestKey256.PublicKey}, - }, - } - case A128GCMKW, A128KW: - return symmetricTestKey(16) - case A192GCMKW, A192KW: - return symmetricTestKey(24) - case A256GCMKW, A256KW: - return symmetricTestKey(32) - case RSA1_5, RSA_OAEP, RSA_OAEP_256: - return []testKey{testKey{ - dec: rsaTestKey, - enc: &rsaTestKey.PublicKey, - }} - } - - panic("Must update test case") -} - -func RunRoundtripsJWE(b *testing.B, alg KeyAlgorithm, enc ContentEncryption, zip CompressionAlgorithm, priv, pub interface{}) { - serializer := func(obj *JsonWebEncryption) (string, error) { - return obj.CompactSerialize() - } - - corrupter := func(obj *JsonWebEncryption) bool { return false } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - err := RoundtripJWE(alg, enc, zip, serializer, corrupter, nil, pub, priv) - if err != nil { - b.Error(err) - } - } -} - -var ( - chunks = map[string][]byte{ - "1B": make([]byte, 1), - "64B": make([]byte, 64), - "1KB": make([]byte, 1024), - "64KB": make([]byte, 65536), - "1MB": make([]byte, 1048576), - "64MB": make([]byte, 67108864), - } - - symKey, _, _ = randomKeyGenerator{size: 32}.genKey() - - encrypters = map[string]Encrypter{ - "OAEPAndGCM": mustEncrypter(RSA_OAEP, A128GCM, &rsaTestKey.PublicKey), - "PKCSAndGCM": mustEncrypter(RSA1_5, A128GCM, &rsaTestKey.PublicKey), - "OAEPAndCBC": mustEncrypter(RSA_OAEP, A128CBC_HS256, &rsaTestKey.PublicKey), - "PKCSAndCBC": mustEncrypter(RSA1_5, A128CBC_HS256, &rsaTestKey.PublicKey), - "DirectGCM128": mustEncrypter(DIRECT, A128GCM, symKey), - "DirectCBC128": mustEncrypter(DIRECT, A128CBC_HS256, symKey), - "DirectGCM256": mustEncrypter(DIRECT, A256GCM, symKey), - "DirectCBC256": mustEncrypter(DIRECT, A256CBC_HS512, symKey), - "AESKWAndGCM128": mustEncrypter(A128KW, A128GCM, symKey), - "AESKWAndCBC256": mustEncrypter(A256KW, A256GCM, symKey), - "ECDHOnP256AndGCM128": mustEncrypter(ECDH_ES, A128GCM, &ecTestKey256.PublicKey), - "ECDHOnP384AndGCM128": mustEncrypter(ECDH_ES, A128GCM, &ecTestKey384.PublicKey), - "ECDHOnP521AndGCM128": mustEncrypter(ECDH_ES, A128GCM, &ecTestKey521.PublicKey), - } -) - -func BenchmarkEncrypt1BWithOAEPAndGCM(b *testing.B) { benchEncrypt("1B", "OAEPAndGCM", b) } -func BenchmarkEncrypt64BWithOAEPAndGCM(b *testing.B) { benchEncrypt("64B", "OAEPAndGCM", b) } -func BenchmarkEncrypt1KBWithOAEPAndGCM(b *testing.B) { benchEncrypt("1KB", "OAEPAndGCM", b) } -func BenchmarkEncrypt64KBWithOAEPAndGCM(b *testing.B) { benchEncrypt("64KB", "OAEPAndGCM", b) } -func BenchmarkEncrypt1MBWithOAEPAndGCM(b *testing.B) { benchEncrypt("1MB", "OAEPAndGCM", b) } -func BenchmarkEncrypt64MBWithOAEPAndGCM(b *testing.B) { benchEncrypt("64MB", "OAEPAndGCM", b) } - -func BenchmarkEncrypt1BWithPKCSAndGCM(b *testing.B) { benchEncrypt("1B", "PKCSAndGCM", b) } -func BenchmarkEncrypt64BWithPKCSAndGCM(b *testing.B) { benchEncrypt("64B", "PKCSAndGCM", b) } -func BenchmarkEncrypt1KBWithPKCSAndGCM(b *testing.B) { benchEncrypt("1KB", "PKCSAndGCM", b) } -func BenchmarkEncrypt64KBWithPKCSAndGCM(b *testing.B) { benchEncrypt("64KB", "PKCSAndGCM", b) } -func BenchmarkEncrypt1MBWithPKCSAndGCM(b *testing.B) { benchEncrypt("1MB", "PKCSAndGCM", b) } -func BenchmarkEncrypt64MBWithPKCSAndGCM(b *testing.B) { benchEncrypt("64MB", "PKCSAndGCM", b) } - -func BenchmarkEncrypt1BWithOAEPAndCBC(b *testing.B) { benchEncrypt("1B", "OAEPAndCBC", b) } -func BenchmarkEncrypt64BWithOAEPAndCBC(b *testing.B) { benchEncrypt("64B", "OAEPAndCBC", b) } -func BenchmarkEncrypt1KBWithOAEPAndCBC(b *testing.B) { benchEncrypt("1KB", "OAEPAndCBC", b) } -func BenchmarkEncrypt64KBWithOAEPAndCBC(b *testing.B) { benchEncrypt("64KB", "OAEPAndCBC", b) } -func BenchmarkEncrypt1MBWithOAEPAndCBC(b *testing.B) { benchEncrypt("1MB", "OAEPAndCBC", b) } -func BenchmarkEncrypt64MBWithOAEPAndCBC(b *testing.B) { benchEncrypt("64MB", "OAEPAndCBC", b) } - -func BenchmarkEncrypt1BWithPKCSAndCBC(b *testing.B) { benchEncrypt("1B", "PKCSAndCBC", b) } -func BenchmarkEncrypt64BWithPKCSAndCBC(b *testing.B) { benchEncrypt("64B", "PKCSAndCBC", b) } -func BenchmarkEncrypt1KBWithPKCSAndCBC(b *testing.B) { benchEncrypt("1KB", "PKCSAndCBC", b) } -func BenchmarkEncrypt64KBWithPKCSAndCBC(b *testing.B) { benchEncrypt("64KB", "PKCSAndCBC", b) } -func BenchmarkEncrypt1MBWithPKCSAndCBC(b *testing.B) { benchEncrypt("1MB", "PKCSAndCBC", b) } -func BenchmarkEncrypt64MBWithPKCSAndCBC(b *testing.B) { benchEncrypt("64MB", "PKCSAndCBC", b) } - -func BenchmarkEncrypt1BWithDirectGCM128(b *testing.B) { benchEncrypt("1B", "DirectGCM128", b) } -func BenchmarkEncrypt64BWithDirectGCM128(b *testing.B) { benchEncrypt("64B", "DirectGCM128", b) } -func BenchmarkEncrypt1KBWithDirectGCM128(b *testing.B) { benchEncrypt("1KB", "DirectGCM128", b) } -func BenchmarkEncrypt64KBWithDirectGCM128(b *testing.B) { benchEncrypt("64KB", "DirectGCM128", b) } -func BenchmarkEncrypt1MBWithDirectGCM128(b *testing.B) { benchEncrypt("1MB", "DirectGCM128", b) } -func BenchmarkEncrypt64MBWithDirectGCM128(b *testing.B) { benchEncrypt("64MB", "DirectGCM128", b) } - -func BenchmarkEncrypt1BWithDirectCBC128(b *testing.B) { benchEncrypt("1B", "DirectCBC128", b) } -func BenchmarkEncrypt64BWithDirectCBC128(b *testing.B) { benchEncrypt("64B", "DirectCBC128", b) } -func BenchmarkEncrypt1KBWithDirectCBC128(b *testing.B) { benchEncrypt("1KB", "DirectCBC128", b) } -func BenchmarkEncrypt64KBWithDirectCBC128(b *testing.B) { benchEncrypt("64KB", "DirectCBC128", b) } -func BenchmarkEncrypt1MBWithDirectCBC128(b *testing.B) { benchEncrypt("1MB", "DirectCBC128", b) } -func BenchmarkEncrypt64MBWithDirectCBC128(b *testing.B) { benchEncrypt("64MB", "DirectCBC128", b) } - -func BenchmarkEncrypt1BWithDirectGCM256(b *testing.B) { benchEncrypt("1B", "DirectGCM256", b) } -func BenchmarkEncrypt64BWithDirectGCM256(b *testing.B) { benchEncrypt("64B", "DirectGCM256", b) } -func BenchmarkEncrypt1KBWithDirectGCM256(b *testing.B) { benchEncrypt("1KB", "DirectGCM256", b) } -func BenchmarkEncrypt64KBWithDirectGCM256(b *testing.B) { benchEncrypt("64KB", "DirectGCM256", b) } -func BenchmarkEncrypt1MBWithDirectGCM256(b *testing.B) { benchEncrypt("1MB", "DirectGCM256", b) } -func BenchmarkEncrypt64MBWithDirectGCM256(b *testing.B) { benchEncrypt("64MB", "DirectGCM256", b) } - -func BenchmarkEncrypt1BWithDirectCBC256(b *testing.B) { benchEncrypt("1B", "DirectCBC256", b) } -func BenchmarkEncrypt64BWithDirectCBC256(b *testing.B) { benchEncrypt("64B", "DirectCBC256", b) } -func BenchmarkEncrypt1KBWithDirectCBC256(b *testing.B) { benchEncrypt("1KB", "DirectCBC256", b) } -func BenchmarkEncrypt64KBWithDirectCBC256(b *testing.B) { benchEncrypt("64KB", "DirectCBC256", b) } -func BenchmarkEncrypt1MBWithDirectCBC256(b *testing.B) { benchEncrypt("1MB", "DirectCBC256", b) } -func BenchmarkEncrypt64MBWithDirectCBC256(b *testing.B) { benchEncrypt("64MB", "DirectCBC256", b) } - -func BenchmarkEncrypt1BWithAESKWAndGCM128(b *testing.B) { benchEncrypt("1B", "AESKWAndGCM128", b) } -func BenchmarkEncrypt64BWithAESKWAndGCM128(b *testing.B) { benchEncrypt("64B", "AESKWAndGCM128", b) } -func BenchmarkEncrypt1KBWithAESKWAndGCM128(b *testing.B) { benchEncrypt("1KB", "AESKWAndGCM128", b) } -func BenchmarkEncrypt64KBWithAESKWAndGCM128(b *testing.B) { benchEncrypt("64KB", "AESKWAndGCM128", b) } -func BenchmarkEncrypt1MBWithAESKWAndGCM128(b *testing.B) { benchEncrypt("1MB", "AESKWAndGCM128", b) } -func BenchmarkEncrypt64MBWithAESKWAndGCM128(b *testing.B) { benchEncrypt("64MB", "AESKWAndGCM128", b) } - -func BenchmarkEncrypt1BWithAESKWAndCBC256(b *testing.B) { benchEncrypt("1B", "AESKWAndCBC256", b) } -func BenchmarkEncrypt64BWithAESKWAndCBC256(b *testing.B) { benchEncrypt("64B", "AESKWAndCBC256", b) } -func BenchmarkEncrypt1KBWithAESKWAndCBC256(b *testing.B) { benchEncrypt("1KB", "AESKWAndCBC256", b) } -func BenchmarkEncrypt64KBWithAESKWAndCBC256(b *testing.B) { benchEncrypt("64KB", "AESKWAndCBC256", b) } -func BenchmarkEncrypt1MBWithAESKWAndCBC256(b *testing.B) { benchEncrypt("1MB", "AESKWAndCBC256", b) } -func BenchmarkEncrypt64MBWithAESKWAndCBC256(b *testing.B) { benchEncrypt("64MB", "AESKWAndCBC256", b) } - -func BenchmarkEncrypt1BWithECDHOnP256AndGCM128(b *testing.B) { - benchEncrypt("1B", "ECDHOnP256AndGCM128", b) -} -func BenchmarkEncrypt64BWithECDHOnP256AndGCM128(b *testing.B) { - benchEncrypt("64B", "ECDHOnP256AndGCM128", b) -} -func BenchmarkEncrypt1KBWithECDHOnP256AndGCM128(b *testing.B) { - benchEncrypt("1KB", "ECDHOnP256AndGCM128", b) -} -func BenchmarkEncrypt64KBWithECDHOnP256AndGCM128(b *testing.B) { - benchEncrypt("64KB", "ECDHOnP256AndGCM128", b) -} -func BenchmarkEncrypt1MBWithECDHOnP256AndGCM128(b *testing.B) { - benchEncrypt("1MB", "ECDHOnP256AndGCM128", b) -} -func BenchmarkEncrypt64MBWithECDHOnP256AndGCM128(b *testing.B) { - benchEncrypt("64MB", "ECDHOnP256AndGCM128", b) -} - -func BenchmarkEncrypt1BWithECDHOnP384AndGCM128(b *testing.B) { - benchEncrypt("1B", "ECDHOnP384AndGCM128", b) -} -func BenchmarkEncrypt64BWithECDHOnP384AndGCM128(b *testing.B) { - benchEncrypt("64B", "ECDHOnP384AndGCM128", b) -} -func BenchmarkEncrypt1KBWithECDHOnP384AndGCM128(b *testing.B) { - benchEncrypt("1KB", "ECDHOnP384AndGCM128", b) -} -func BenchmarkEncrypt64KBWithECDHOnP384AndGCM128(b *testing.B) { - benchEncrypt("64KB", "ECDHOnP384AndGCM128", b) -} -func BenchmarkEncrypt1MBWithECDHOnP384AndGCM128(b *testing.B) { - benchEncrypt("1MB", "ECDHOnP384AndGCM128", b) -} -func BenchmarkEncrypt64MBWithECDHOnP384AndGCM128(b *testing.B) { - benchEncrypt("64MB", "ECDHOnP384AndGCM128", b) -} - -func BenchmarkEncrypt1BWithECDHOnP521AndGCM128(b *testing.B) { - benchEncrypt("1B", "ECDHOnP521AndGCM128", b) -} -func BenchmarkEncrypt64BWithECDHOnP521AndGCM128(b *testing.B) { - benchEncrypt("64B", "ECDHOnP521AndGCM128", b) -} -func BenchmarkEncrypt1KBWithECDHOnP521AndGCM128(b *testing.B) { - benchEncrypt("1KB", "ECDHOnP521AndGCM128", b) -} -func BenchmarkEncrypt64KBWithECDHOnP521AndGCM128(b *testing.B) { - benchEncrypt("64KB", "ECDHOnP521AndGCM128", b) -} -func BenchmarkEncrypt1MBWithECDHOnP521AndGCM128(b *testing.B) { - benchEncrypt("1MB", "ECDHOnP521AndGCM128", b) -} -func BenchmarkEncrypt64MBWithECDHOnP521AndGCM128(b *testing.B) { - benchEncrypt("64MB", "ECDHOnP521AndGCM128", b) -} - -func benchEncrypt(chunkKey, primKey string, b *testing.B) { - data, ok := chunks[chunkKey] - if !ok { - b.Fatalf("unknown chunk size %s", chunkKey) - } - - enc, ok := encrypters[primKey] - if !ok { - b.Fatalf("unknown encrypter %s", primKey) - } - - b.SetBytes(int64(len(data))) - for i := 0; i < b.N; i++ { - enc.Encrypt(data) - } -} - -var ( - decryptionKeys = map[string]interface{}{ - "OAEPAndGCM": rsaTestKey, - "PKCSAndGCM": rsaTestKey, - "OAEPAndCBC": rsaTestKey, - "PKCSAndCBC": rsaTestKey, - - "DirectGCM128": symKey, - "DirectCBC128": symKey, - "DirectGCM256": symKey, - "DirectCBC256": symKey, - - "AESKWAndGCM128": symKey, - "AESKWAndCBC256": symKey, - - "ECDHOnP256AndGCM128": ecTestKey256, - "ECDHOnP384AndGCM128": ecTestKey384, - "ECDHOnP521AndGCM128": ecTestKey521, - } -) - -func BenchmarkDecrypt1BWithOAEPAndGCM(b *testing.B) { benchDecrypt("1B", "OAEPAndGCM", b) } -func BenchmarkDecrypt64BWithOAEPAndGCM(b *testing.B) { benchDecrypt("64B", "OAEPAndGCM", b) } -func BenchmarkDecrypt1KBWithOAEPAndGCM(b *testing.B) { benchDecrypt("1KB", "OAEPAndGCM", b) } -func BenchmarkDecrypt64KBWithOAEPAndGCM(b *testing.B) { benchDecrypt("64KB", "OAEPAndGCM", b) } -func BenchmarkDecrypt1MBWithOAEPAndGCM(b *testing.B) { benchDecrypt("1MB", "OAEPAndGCM", b) } -func BenchmarkDecrypt64MBWithOAEPAndGCM(b *testing.B) { benchDecrypt("64MB", "OAEPAndGCM", b) } - -func BenchmarkDecrypt1BWithPKCSAndGCM(b *testing.B) { benchDecrypt("1B", "PKCSAndGCM", b) } -func BenchmarkDecrypt64BWithPKCSAndGCM(b *testing.B) { benchDecrypt("64B", "PKCSAndGCM", b) } -func BenchmarkDecrypt1KBWithPKCSAndGCM(b *testing.B) { benchDecrypt("1KB", "PKCSAndGCM", b) } -func BenchmarkDecrypt64KBWithPKCSAndGCM(b *testing.B) { benchDecrypt("64KB", "PKCSAndGCM", b) } -func BenchmarkDecrypt1MBWithPKCSAndGCM(b *testing.B) { benchDecrypt("1MB", "PKCSAndGCM", b) } -func BenchmarkDecrypt64MBWithPKCSAndGCM(b *testing.B) { benchDecrypt("64MB", "PKCSAndGCM", b) } - -func BenchmarkDecrypt1BWithOAEPAndCBC(b *testing.B) { benchDecrypt("1B", "OAEPAndCBC", b) } -func BenchmarkDecrypt64BWithOAEPAndCBC(b *testing.B) { benchDecrypt("64B", "OAEPAndCBC", b) } -func BenchmarkDecrypt1KBWithOAEPAndCBC(b *testing.B) { benchDecrypt("1KB", "OAEPAndCBC", b) } -func BenchmarkDecrypt64KBWithOAEPAndCBC(b *testing.B) { benchDecrypt("64KB", "OAEPAndCBC", b) } -func BenchmarkDecrypt1MBWithOAEPAndCBC(b *testing.B) { benchDecrypt("1MB", "OAEPAndCBC", b) } -func BenchmarkDecrypt64MBWithOAEPAndCBC(b *testing.B) { benchDecrypt("64MB", "OAEPAndCBC", b) } - -func BenchmarkDecrypt1BWithPKCSAndCBC(b *testing.B) { benchDecrypt("1B", "PKCSAndCBC", b) } -func BenchmarkDecrypt64BWithPKCSAndCBC(b *testing.B) { benchDecrypt("64B", "PKCSAndCBC", b) } -func BenchmarkDecrypt1KBWithPKCSAndCBC(b *testing.B) { benchDecrypt("1KB", "PKCSAndCBC", b) } -func BenchmarkDecrypt64KBWithPKCSAndCBC(b *testing.B) { benchDecrypt("64KB", "PKCSAndCBC", b) } -func BenchmarkDecrypt1MBWithPKCSAndCBC(b *testing.B) { benchDecrypt("1MB", "PKCSAndCBC", b) } -func BenchmarkDecrypt64MBWithPKCSAndCBC(b *testing.B) { benchDecrypt("64MB", "PKCSAndCBC", b) } - -func BenchmarkDecrypt1BWithDirectGCM128(b *testing.B) { benchDecrypt("1B", "DirectGCM128", b) } -func BenchmarkDecrypt64BWithDirectGCM128(b *testing.B) { benchDecrypt("64B", "DirectGCM128", b) } -func BenchmarkDecrypt1KBWithDirectGCM128(b *testing.B) { benchDecrypt("1KB", "DirectGCM128", b) } -func BenchmarkDecrypt64KBWithDirectGCM128(b *testing.B) { benchDecrypt("64KB", "DirectGCM128", b) } -func BenchmarkDecrypt1MBWithDirectGCM128(b *testing.B) { benchDecrypt("1MB", "DirectGCM128", b) } -func BenchmarkDecrypt64MBWithDirectGCM128(b *testing.B) { benchDecrypt("64MB", "DirectGCM128", b) } - -func BenchmarkDecrypt1BWithDirectCBC128(b *testing.B) { benchDecrypt("1B", "DirectCBC128", b) } -func BenchmarkDecrypt64BWithDirectCBC128(b *testing.B) { benchDecrypt("64B", "DirectCBC128", b) } -func BenchmarkDecrypt1KBWithDirectCBC128(b *testing.B) { benchDecrypt("1KB", "DirectCBC128", b) } -func BenchmarkDecrypt64KBWithDirectCBC128(b *testing.B) { benchDecrypt("64KB", "DirectCBC128", b) } -func BenchmarkDecrypt1MBWithDirectCBC128(b *testing.B) { benchDecrypt("1MB", "DirectCBC128", b) } -func BenchmarkDecrypt64MBWithDirectCBC128(b *testing.B) { benchDecrypt("64MB", "DirectCBC128", b) } - -func BenchmarkDecrypt1BWithDirectGCM256(b *testing.B) { benchDecrypt("1B", "DirectGCM256", b) } -func BenchmarkDecrypt64BWithDirectGCM256(b *testing.B) { benchDecrypt("64B", "DirectGCM256", b) } -func BenchmarkDecrypt1KBWithDirectGCM256(b *testing.B) { benchDecrypt("1KB", "DirectGCM256", b) } -func BenchmarkDecrypt64KBWithDirectGCM256(b *testing.B) { benchDecrypt("64KB", "DirectGCM256", b) } -func BenchmarkDecrypt1MBWithDirectGCM256(b *testing.B) { benchDecrypt("1MB", "DirectGCM256", b) } -func BenchmarkDecrypt64MBWithDirectGCM256(b *testing.B) { benchDecrypt("64MB", "DirectGCM256", b) } - -func BenchmarkDecrypt1BWithDirectCBC256(b *testing.B) { benchDecrypt("1B", "DirectCBC256", b) } -func BenchmarkDecrypt64BWithDirectCBC256(b *testing.B) { benchDecrypt("64B", "DirectCBC256", b) } -func BenchmarkDecrypt1KBWithDirectCBC256(b *testing.B) { benchDecrypt("1KB", "DirectCBC256", b) } -func BenchmarkDecrypt64KBWithDirectCBC256(b *testing.B) { benchDecrypt("64KB", "DirectCBC256", b) } -func BenchmarkDecrypt1MBWithDirectCBC256(b *testing.B) { benchDecrypt("1MB", "DirectCBC256", b) } -func BenchmarkDecrypt64MBWithDirectCBC256(b *testing.B) { benchDecrypt("64MB", "DirectCBC256", b) } - -func BenchmarkDecrypt1BWithAESKWAndGCM128(b *testing.B) { benchDecrypt("1B", "AESKWAndGCM128", b) } -func BenchmarkDecrypt64BWithAESKWAndGCM128(b *testing.B) { benchDecrypt("64B", "AESKWAndGCM128", b) } -func BenchmarkDecrypt1KBWithAESKWAndGCM128(b *testing.B) { benchDecrypt("1KB", "AESKWAndGCM128", b) } -func BenchmarkDecrypt64KBWithAESKWAndGCM128(b *testing.B) { benchDecrypt("64KB", "AESKWAndGCM128", b) } -func BenchmarkDecrypt1MBWithAESKWAndGCM128(b *testing.B) { benchDecrypt("1MB", "AESKWAndGCM128", b) } -func BenchmarkDecrypt64MBWithAESKWAndGCM128(b *testing.B) { benchDecrypt("64MB", "AESKWAndGCM128", b) } - -func BenchmarkDecrypt1BWithAESKWAndCBC256(b *testing.B) { benchDecrypt("1B", "AESKWAndCBC256", b) } -func BenchmarkDecrypt64BWithAESKWAndCBC256(b *testing.B) { benchDecrypt("64B", "AESKWAndCBC256", b) } -func BenchmarkDecrypt1KBWithAESKWAndCBC256(b *testing.B) { benchDecrypt("1KB", "AESKWAndCBC256", b) } -func BenchmarkDecrypt64KBWithAESKWAndCBC256(b *testing.B) { benchDecrypt("64KB", "AESKWAndCBC256", b) } -func BenchmarkDecrypt1MBWithAESKWAndCBC256(b *testing.B) { benchDecrypt("1MB", "AESKWAndCBC256", b) } -func BenchmarkDecrypt64MBWithAESKWAndCBC256(b *testing.B) { benchDecrypt("64MB", "AESKWAndCBC256", b) } - -func BenchmarkDecrypt1BWithECDHOnP256AndGCM128(b *testing.B) { - benchDecrypt("1B", "ECDHOnP256AndGCM128", b) -} -func BenchmarkDecrypt64BWithECDHOnP256AndGCM128(b *testing.B) { - benchDecrypt("64B", "ECDHOnP256AndGCM128", b) -} -func BenchmarkDecrypt1KBWithECDHOnP256AndGCM128(b *testing.B) { - benchDecrypt("1KB", "ECDHOnP256AndGCM128", b) -} -func BenchmarkDecrypt64KBWithECDHOnP256AndGCM128(b *testing.B) { - benchDecrypt("64KB", "ECDHOnP256AndGCM128", b) -} -func BenchmarkDecrypt1MBWithECDHOnP256AndGCM128(b *testing.B) { - benchDecrypt("1MB", "ECDHOnP256AndGCM128", b) -} -func BenchmarkDecrypt64MBWithECDHOnP256AndGCM128(b *testing.B) { - benchDecrypt("64MB", "ECDHOnP256AndGCM128", b) -} - -func BenchmarkDecrypt1BWithECDHOnP384AndGCM128(b *testing.B) { - benchDecrypt("1B", "ECDHOnP384AndGCM128", b) -} -func BenchmarkDecrypt64BWithECDHOnP384AndGCM128(b *testing.B) { - benchDecrypt("64B", "ECDHOnP384AndGCM128", b) -} -func BenchmarkDecrypt1KBWithECDHOnP384AndGCM128(b *testing.B) { - benchDecrypt("1KB", "ECDHOnP384AndGCM128", b) -} -func BenchmarkDecrypt64KBWithECDHOnP384AndGCM128(b *testing.B) { - benchDecrypt("64KB", "ECDHOnP384AndGCM128", b) -} -func BenchmarkDecrypt1MBWithECDHOnP384AndGCM128(b *testing.B) { - benchDecrypt("1MB", "ECDHOnP384AndGCM128", b) -} -func BenchmarkDecrypt64MBWithECDHOnP384AndGCM128(b *testing.B) { - benchDecrypt("64MB", "ECDHOnP384AndGCM128", b) -} - -func BenchmarkDecrypt1BWithECDHOnP521AndGCM128(b *testing.B) { - benchDecrypt("1B", "ECDHOnP521AndGCM128", b) -} -func BenchmarkDecrypt64BWithECDHOnP521AndGCM128(b *testing.B) { - benchDecrypt("64B", "ECDHOnP521AndGCM128", b) -} -func BenchmarkDecrypt1KBWithECDHOnP521AndGCM128(b *testing.B) { - benchDecrypt("1KB", "ECDHOnP521AndGCM128", b) -} -func BenchmarkDecrypt64KBWithECDHOnP521AndGCM128(b *testing.B) { - benchDecrypt("64KB", "ECDHOnP521AndGCM128", b) -} -func BenchmarkDecrypt1MBWithECDHOnP521AndGCM128(b *testing.B) { - benchDecrypt("1MB", "ECDHOnP521AndGCM128", b) -} -func BenchmarkDecrypt64MBWithECDHOnP521AndGCM128(b *testing.B) { - benchDecrypt("64MB", "ECDHOnP521AndGCM128", b) -} - -func benchDecrypt(chunkKey, primKey string, b *testing.B) { - chunk, ok := chunks[chunkKey] - if !ok { - b.Fatalf("unknown chunk size %s", chunkKey) - } - - enc, ok := encrypters[primKey] - if !ok { - b.Fatalf("unknown encrypter %s", primKey) - } - - dec, ok := decryptionKeys[primKey] - if !ok { - b.Fatalf("unknown decryption key %s", primKey) - } - - data, err := enc.Encrypt(chunk) - if err != nil { - b.Fatal(err) - } - - b.SetBytes(int64(len(chunk))) - b.ResetTimer() - for i := 0; i < b.N; i++ { - data.Decrypt(dec) - } -} - -func mustEncrypter(keyAlg KeyAlgorithm, encAlg ContentEncryption, encryptionKey interface{}) Encrypter { - enc, err := NewEncrypter(keyAlg, encAlg, encryptionKey) - if err != nil { - panic(err) - } - return enc -} diff --git a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/doc_test.go b/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/doc_test.go deleted file mode 100644 index 50468295..00000000 --- a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/doc_test.go +++ /dev/null @@ -1,226 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package jose - -import ( - "crypto/ecdsa" - "crypto/rand" - "crypto/rsa" - "fmt" -) - -// Dummy encrypter for use in examples -var encrypter, _ = NewEncrypter(DIRECT, A128GCM, []byte{}) - -func Example_jWE() { - // Generate a public/private key pair to use for this example. The library - // also provides two utility functions (LoadPublicKey and LoadPrivateKey) - // that can be used to load keys from PEM/DER-encoded data. - privateKey, err := rsa.GenerateKey(rand.Reader, 2048) - if err != nil { - panic(err) - } - - // Instantiate an encrypter using RSA-OAEP with AES128-GCM. An error would - // indicate that the selected algorithm(s) are not currently supported. - publicKey := &privateKey.PublicKey - encrypter, err := NewEncrypter(RSA_OAEP, A128GCM, publicKey) - if err != nil { - panic(err) - } - - // Encrypt a sample plaintext. Calling the encrypter returns an encrypted - // JWE object, which can then be serialized for output afterwards. An error - // would indicate a problem in an underlying cryptographic primitive. - var plaintext = []byte("Lorem ipsum dolor sit amet") - object, err := encrypter.Encrypt(plaintext) - if err != nil { - panic(err) - } - - // Serialize the encrypted object using the full serialization format. - // Alternatively you can also use the compact format here by calling - // object.CompactSerialize() instead. - serialized := object.FullSerialize() - - // Parse the serialized, encrypted JWE object. An error would indicate that - // the given input did not represent a valid message. - object, err = ParseEncrypted(serialized) - if err != nil { - panic(err) - } - - // Now we can decrypt and get back our original plaintext. An error here - // would indicate the the message failed to decrypt, e.g. because the auth - // tag was broken or the message was tampered with. - decrypted, err := object.Decrypt(privateKey) - if err != nil { - panic(err) - } - - fmt.Printf(string(decrypted)) - // output: Lorem ipsum dolor sit amet -} - -func Example_jWS() { - // Generate a public/private key pair to use for this example. The library - // also provides two utility functions (LoadPublicKey and LoadPrivateKey) - // that can be used to load keys from PEM/DER-encoded data. - privateKey, err := rsa.GenerateKey(rand.Reader, 2048) - if err != nil { - panic(err) - } - - // Instantiate a signer using RSASSA-PSS (SHA512) with the given private key. - signer, err := NewSigner(PS512, privateKey) - if err != nil { - panic(err) - } - - // Sign a sample payload. Calling the signer returns a protected JWS object, - // which can then be serialized for output afterwards. An error would - // indicate a problem in an underlying cryptographic primitive. - var payload = []byte("Lorem ipsum dolor sit amet") - object, err := signer.Sign(payload) - if err != nil { - panic(err) - } - - // Serialize the encrypted object using the full serialization format. - // Alternatively you can also use the compact format here by calling - // object.CompactSerialize() instead. - serialized := object.FullSerialize() - - // Parse the serialized, protected JWS object. An error would indicate that - // the given input did not represent a valid message. - object, err = ParseSigned(serialized) - if err != nil { - panic(err) - } - - // Now we can verify the signature on the payload. An error here would - // indicate the the message failed to verify, e.g. because the signature was - // broken or the message was tampered with. - output, err := object.Verify(&privateKey.PublicKey) - if err != nil { - panic(err) - } - - fmt.Printf(string(output)) - // output: Lorem ipsum dolor sit amet -} - -func ExampleNewEncrypter_publicKey() { - var publicKey *rsa.PublicKey - - // Instantiate an encrypter using RSA-OAEP with AES128-GCM. - NewEncrypter(RSA_OAEP, A128GCM, publicKey) - - // Instantiate an encrypter using RSA-PKCS1v1.5 with AES128-CBC+HMAC. - NewEncrypter(RSA1_5, A128CBC_HS256, publicKey) -} - -func ExampleNewEncrypter_symmetric() { - var sharedKey []byte - - // Instantiate an encrypter using AES128-GCM with AES-GCM key wrap. - NewEncrypter(A128GCMKW, A128GCM, sharedKey) - - // Instantiate an encrypter using AES256-GCM directly, w/o key wrapping. - NewEncrypter(DIRECT, A256GCM, sharedKey) -} - -func ExampleNewSigner_publicKey() { - var rsaPrivateKey *rsa.PrivateKey - var ecdsaPrivateKey *ecdsa.PrivateKey - - // Instantiate a signer using RSA-PKCS#1v1.5 with SHA-256. - NewSigner(RS256, rsaPrivateKey) - - // Instantiate a signer using ECDSA with SHA-384. - NewSigner(ES384, ecdsaPrivateKey) -} - -func ExampleNewSigner_symmetric() { - var sharedKey []byte - - // Instantiate an signer using HMAC-SHA256. - NewSigner(HS256, sharedKey) - - // Instantiate an signer using HMAC-SHA512. - NewSigner(HS512, sharedKey) -} - -func ExampleNewMultiEncrypter() { - var publicKey *rsa.PublicKey - var sharedKey []byte - - // Instantiate an encrypter using AES-GCM. - encrypter, err := NewMultiEncrypter(A128GCM) - if err != nil { - panic(err) - } - - // Add a recipient using a shared key with AES-GCM key wap - err = encrypter.AddRecipient(A128GCMKW, sharedKey) - if err != nil { - panic(err) - } - - // Add a recipient using an RSA public key with RSA-OAEP - err = encrypter.AddRecipient(RSA_OAEP, publicKey) - if err != nil { - panic(err) - } -} - -func ExampleNewMultiSigner() { - var privateKey *rsa.PrivateKey - var sharedKey []byte - - // Instantiate a signer for multiple recipients. - signer := NewMultiSigner() - - // Add a recipient using a shared key with HMAC-SHA256 - err := signer.AddRecipient(HS256, sharedKey) - if err != nil { - panic(err) - } - - // Add a recipient using an RSA private key with RSASSA-PSS with SHA384 - err = signer.AddRecipient(PS384, privateKey) - if err != nil { - panic(err) - } -} - -func ExampleEncrypter_encrypt() { - // Encrypt a plaintext in order to get an encrypted JWE object. - var plaintext = []byte("This is a secret message") - - encrypter.Encrypt(plaintext) -} - -func ExampleEncrypter_encryptWithAuthData() { - // Encrypt a plaintext in order to get an encrypted JWE object. Also attach - // some additional authenticated data (AAD) to the object. Note that objects - // with attached AAD can only be represented using full serialization. - var plaintext = []byte("This is a secret message") - var aad = []byte("This is authenticated, but public data") - - encrypter.EncryptWithAuthData(plaintext, aad) -} diff --git a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/encoding_test.go b/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/encoding_test.go deleted file mode 100644 index e2f8d979..00000000 --- a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/encoding_test.go +++ /dev/null @@ -1,173 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package jose - -import ( - "bytes" - "strings" - "testing" -) - -func TestBase64URLEncode(t *testing.T) { - // Test arrays with various sizes - if base64URLEncode([]byte{}) != "" { - t.Error("failed to encode empty array") - } - - if base64URLEncode([]byte{0}) != "AA" { - t.Error("failed to encode [0x00]") - } - - if base64URLEncode([]byte{0, 1}) != "AAE" { - t.Error("failed to encode [0x00, 0x01]") - } - - if base64URLEncode([]byte{0, 1, 2}) != "AAEC" { - t.Error("failed to encode [0x00, 0x01, 0x02]") - } - - if base64URLEncode([]byte{0, 1, 2, 3}) != "AAECAw" { - t.Error("failed to encode [0x00, 0x01, 0x02, 0x03]") - } -} - -func TestBase64URLDecode(t *testing.T) { - // Test arrays with various sizes - val, err := base64URLDecode("") - if err != nil || !bytes.Equal(val, []byte{}) { - t.Error("failed to decode empty array") - } - - val, err = base64URLDecode("AA") - if err != nil || !bytes.Equal(val, []byte{0}) { - t.Error("failed to decode [0x00]") - } - - val, err = base64URLDecode("AAE") - if err != nil || !bytes.Equal(val, []byte{0, 1}) { - t.Error("failed to decode [0x00, 0x01]") - } - - val, err = base64URLDecode("AAEC") - if err != nil || !bytes.Equal(val, []byte{0, 1, 2}) { - t.Error("failed to decode [0x00, 0x01, 0x02]") - } - - val, err = base64URLDecode("AAECAw") - if err != nil || !bytes.Equal(val, []byte{0, 1, 2, 3}) { - t.Error("failed to decode [0x00, 0x01, 0x02, 0x03]") - } -} - -func TestDeflateRoundtrip(t *testing.T) { - original := []byte("Lorem ipsum dolor sit amet") - - compressed, err := deflate(original) - if err != nil { - panic(err) - } - - output, err := inflate(compressed) - if err != nil { - panic(err) - } - - if bytes.Compare(output, original) != 0 { - t.Error("Input and output do not match") - } -} - -func TestInvalidCompression(t *testing.T) { - _, err := compress("XYZ", []byte{}) - if err == nil { - t.Error("should not accept invalid algorithm") - } - - _, err = decompress("XYZ", []byte{}) - if err == nil { - t.Error("should not accept invalid algorithm") - } - - _, err = decompress(DEFLATE, []byte{1, 2, 3, 4}) - if err == nil { - t.Error("should not accept invalid data") - } -} - -func TestByteBufferTrim(t *testing.T) { - buf := newBufferFromInt(1) - if !bytes.Equal(buf.data, []byte{1}) { - t.Error("Byte buffer for integer '1' should contain [0x01]") - } - - buf = newBufferFromInt(65537) - if !bytes.Equal(buf.data, []byte{1, 0, 1}) { - t.Error("Byte buffer for integer '65537' should contain [0x01, 0x00, 0x01]") - } -} - -func TestFixedSizeBuffer(t *testing.T) { - data0 := []byte{} - data1 := []byte{1} - data2 := []byte{1, 2} - data3 := []byte{1, 2, 3} - data4 := []byte{1, 2, 3, 4} - - buf0 := newFixedSizeBuffer(data0, 4) - buf1 := newFixedSizeBuffer(data1, 4) - buf2 := newFixedSizeBuffer(data2, 4) - buf3 := newFixedSizeBuffer(data3, 4) - buf4 := newFixedSizeBuffer(data4, 4) - - if !bytes.Equal(buf0.data, []byte{0, 0, 0, 0}) { - t.Error("Invalid padded buffer for buf0") - } - if !bytes.Equal(buf1.data, []byte{0, 0, 0, 1}) { - t.Error("Invalid padded buffer for buf1") - } - if !bytes.Equal(buf2.data, []byte{0, 0, 1, 2}) { - t.Error("Invalid padded buffer for buf2") - } - if !bytes.Equal(buf3.data, []byte{0, 1, 2, 3}) { - t.Error("Invalid padded buffer for buf3") - } - if !bytes.Equal(buf4.data, []byte{1, 2, 3, 4}) { - t.Error("Invalid padded buffer for buf4") - } -} - -func TestSerializeJSONRejectsNil(t *testing.T) { - defer func() { - r := recover() - if r == nil || !strings.Contains(r.(string), "nil pointer") { - t.Error("serialize function should not accept nil pointer") - } - }() - - mustSerializeJSON(nil) -} - -func TestFixedSizeBufferTooLarge(t *testing.T) { - defer func() { - r := recover() - if r == nil { - t.Error("should not be able to create fixed size buffer with oversized data") - } - }() - - newFixedSizeBuffer(make([]byte, 2), 1) -} diff --git a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/README.md b/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/README.md deleted file mode 100644 index 86de5e55..00000000 --- a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/README.md +++ /dev/null @@ -1,13 +0,0 @@ -# Safe JSON - -This repository contains a fork of the `encoding/json` package from Go 1.6. - -The following changes were made: - -* Object deserialization uses case-sensitive member name matching instead of - [case-insensitive matching](https://www.ietf.org/mail-archive/web/json/current/msg03763.html). - This is to avoid differences in the interpretation of JOSE messages between - go-jose and libraries written in other languages. -* When deserializing a JSON object, we check for duplicate keys and reject the - input whenever we detect a duplicate. Rather than trying to work with malformed - data, we prefer to reject it right away. diff --git a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/bench_test.go b/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/bench_test.go deleted file mode 100644 index ed89d115..00000000 --- a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/bench_test.go +++ /dev/null @@ -1,223 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Large data benchmark. -// The JSON data is a summary of agl's changes in the -// go, webkit, and chromium open source projects. -// We benchmark converting between the JSON form -// and in-memory data structures. - -package json - -import ( - "bytes" - "compress/gzip" - "io/ioutil" - "os" - "strings" - "testing" -) - -type codeResponse struct { - Tree *codeNode `json:"tree"` - Username string `json:"username"` -} - -type codeNode struct { - Name string `json:"name"` - Kids []*codeNode `json:"kids"` - CLWeight float64 `json:"cl_weight"` - Touches int `json:"touches"` - MinT int64 `json:"min_t"` - MaxT int64 `json:"max_t"` - MeanT int64 `json:"mean_t"` -} - -var codeJSON []byte -var codeStruct codeResponse - -func codeInit() { - f, err := os.Open("testdata/code.json.gz") - if err != nil { - panic(err) - } - defer f.Close() - gz, err := gzip.NewReader(f) - if err != nil { - panic(err) - } - data, err := ioutil.ReadAll(gz) - if err != nil { - panic(err) - } - - codeJSON = data - - if err := Unmarshal(codeJSON, &codeStruct); err != nil { - panic("unmarshal code.json: " + err.Error()) - } - - if data, err = Marshal(&codeStruct); err != nil { - panic("marshal code.json: " + err.Error()) - } - - if !bytes.Equal(data, codeJSON) { - println("different lengths", len(data), len(codeJSON)) - for i := 0; i < len(data) && i < len(codeJSON); i++ { - if data[i] != codeJSON[i] { - println("re-marshal: changed at byte", i) - println("orig: ", string(codeJSON[i-10:i+10])) - println("new: ", string(data[i-10:i+10])) - break - } - } - panic("re-marshal code.json: different result") - } -} - -func BenchmarkCodeEncoder(b *testing.B) { - if codeJSON == nil { - b.StopTimer() - codeInit() - b.StartTimer() - } - enc := NewEncoder(ioutil.Discard) - for i := 0; i < b.N; i++ { - if err := enc.Encode(&codeStruct); err != nil { - b.Fatal("Encode:", err) - } - } - b.SetBytes(int64(len(codeJSON))) -} - -func BenchmarkCodeMarshal(b *testing.B) { - if codeJSON == nil { - b.StopTimer() - codeInit() - b.StartTimer() - } - for i := 0; i < b.N; i++ { - if _, err := Marshal(&codeStruct); err != nil { - b.Fatal("Marshal:", err) - } - } - b.SetBytes(int64(len(codeJSON))) -} - -func BenchmarkCodeDecoder(b *testing.B) { - if codeJSON == nil { - b.StopTimer() - codeInit() - b.StartTimer() - } - var buf bytes.Buffer - dec := NewDecoder(&buf) - var r codeResponse - for i := 0; i < b.N; i++ { - buf.Write(codeJSON) - // hide EOF - buf.WriteByte('\n') - buf.WriteByte('\n') - buf.WriteByte('\n') - if err := dec.Decode(&r); err != nil { - b.Fatal("Decode:", err) - } - } - b.SetBytes(int64(len(codeJSON))) -} - -func BenchmarkDecoderStream(b *testing.B) { - b.StopTimer() - var buf bytes.Buffer - dec := NewDecoder(&buf) - buf.WriteString(`"` + strings.Repeat("x", 1000000) + `"` + "\n\n\n") - var x interface{} - if err := dec.Decode(&x); err != nil { - b.Fatal("Decode:", err) - } - ones := strings.Repeat(" 1\n", 300000) + "\n\n\n" - b.StartTimer() - for i := 0; i < b.N; i++ { - if i%300000 == 0 { - buf.WriteString(ones) - } - x = nil - if err := dec.Decode(&x); err != nil || x != 1.0 { - b.Fatalf("Decode: %v after %d", err, i) - } - } -} - -func BenchmarkCodeUnmarshal(b *testing.B) { - if codeJSON == nil { - b.StopTimer() - codeInit() - b.StartTimer() - } - for i := 0; i < b.N; i++ { - var r codeResponse - if err := Unmarshal(codeJSON, &r); err != nil { - b.Fatal("Unmmarshal:", err) - } - } - b.SetBytes(int64(len(codeJSON))) -} - -func BenchmarkCodeUnmarshalReuse(b *testing.B) { - if codeJSON == nil { - b.StopTimer() - codeInit() - b.StartTimer() - } - var r codeResponse - for i := 0; i < b.N; i++ { - if err := Unmarshal(codeJSON, &r); err != nil { - b.Fatal("Unmmarshal:", err) - } - } -} - -func BenchmarkUnmarshalString(b *testing.B) { - data := []byte(`"hello, world"`) - var s string - - for i := 0; i < b.N; i++ { - if err := Unmarshal(data, &s); err != nil { - b.Fatal("Unmarshal:", err) - } - } -} - -func BenchmarkUnmarshalFloat64(b *testing.B) { - var f float64 - data := []byte(`3.14`) - - for i := 0; i < b.N; i++ { - if err := Unmarshal(data, &f); err != nil { - b.Fatal("Unmarshal:", err) - } - } -} - -func BenchmarkUnmarshalInt64(b *testing.B) { - var x int64 - data := []byte(`3`) - - for i := 0; i < b.N; i++ { - if err := Unmarshal(data, &x); err != nil { - b.Fatal("Unmarshal:", err) - } - } -} - -func BenchmarkIssue10335(b *testing.B) { - b.ReportAllocs() - var s struct{} - j := []byte(`{"a":{ }}`) - for n := 0; n < b.N; n++ { - if err := Unmarshal(j, &s); err != nil { - b.Fatal(err) - } - } -} diff --git a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/decode_test.go b/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/decode_test.go deleted file mode 100644 index 7577b21a..00000000 --- a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/decode_test.go +++ /dev/null @@ -1,1474 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package json - -import ( - "bytes" - "encoding" - "fmt" - "image" - "net" - "reflect" - "strings" - "testing" - "time" -) - -type T struct { - X string - Y int - Z int `json:"-"` -} - -type U struct { - Alphabet string `json:"alpha"` -} - -type V struct { - F1 interface{} - F2 int32 - F3 Number -} - -// ifaceNumAsFloat64/ifaceNumAsNumber are used to test unmarshaling with and -// without UseNumber -var ifaceNumAsFloat64 = map[string]interface{}{ - "k1": float64(1), - "k2": "s", - "k3": []interface{}{float64(1), float64(2.0), float64(3e-3)}, - "k4": map[string]interface{}{"kk1": "s", "kk2": float64(2)}, -} - -var ifaceNumAsNumber = map[string]interface{}{ - "k1": Number("1"), - "k2": "s", - "k3": []interface{}{Number("1"), Number("2.0"), Number("3e-3")}, - "k4": map[string]interface{}{"kk1": "s", "kk2": Number("2")}, -} - -type tx struct { - x int -} - -// A type that can unmarshal itself. - -type unmarshaler struct { - T bool -} - -func (u *unmarshaler) UnmarshalJSON(b []byte) error { - *u = unmarshaler{true} // All we need to see that UnmarshalJSON is called. - return nil -} - -type ustruct struct { - M unmarshaler -} - -type unmarshalerText struct { - T bool -} - -// needed for re-marshaling tests -func (u *unmarshalerText) MarshalText() ([]byte, error) { - return []byte(""), nil -} - -func (u *unmarshalerText) UnmarshalText(b []byte) error { - *u = unmarshalerText{true} // All we need to see that UnmarshalText is called. - return nil -} - -var _ encoding.TextUnmarshaler = (*unmarshalerText)(nil) - -type ustructText struct { - M unmarshalerText -} - -var ( - um0, um1 unmarshaler // target2 of unmarshaling - ump = &um1 - umtrue = unmarshaler{true} - umslice = []unmarshaler{{true}} - umslicep = new([]unmarshaler) - umstruct = ustruct{unmarshaler{true}} - - um0T, um1T unmarshalerText // target2 of unmarshaling - umpT = &um1T - umtrueT = unmarshalerText{true} - umsliceT = []unmarshalerText{{true}} - umslicepT = new([]unmarshalerText) - umstructT = ustructText{unmarshalerText{true}} -) - -// Test data structures for anonymous fields. - -type Point struct { - Z int -} - -type Top struct { - Level0 int - Embed0 - *Embed0a - *Embed0b `json:"e,omitempty"` // treated as named - Embed0c `json:"-"` // ignored - Loop - Embed0p // has Point with X, Y, used - Embed0q // has Point with Z, used - embed // contains exported field -} - -type Embed0 struct { - Level1a int // overridden by Embed0a's Level1a with json tag - Level1b int // used because Embed0a's Level1b is renamed - Level1c int // used because Embed0a's Level1c is ignored - Level1d int // annihilated by Embed0a's Level1d - Level1e int `json:"x"` // annihilated by Embed0a.Level1e -} - -type Embed0a struct { - Level1a int `json:"Level1a,omitempty"` - Level1b int `json:"LEVEL1B,omitempty"` - Level1c int `json:"-"` - Level1d int // annihilated by Embed0's Level1d - Level1f int `json:"x"` // annihilated by Embed0's Level1e -} - -type Embed0b Embed0 - -type Embed0c Embed0 - -type Embed0p struct { - image.Point -} - -type Embed0q struct { - Point -} - -type embed struct { - Q int -} - -type Loop struct { - Loop1 int `json:",omitempty"` - Loop2 int `json:",omitempty"` - *Loop -} - -// From reflect test: -// The X in S6 and S7 annihilate, but they also block the X in S8.S9. -type S5 struct { - S6 - S7 - S8 -} - -type S6 struct { - X int -} - -type S7 S6 - -type S8 struct { - S9 -} - -type S9 struct { - X int - Y int -} - -// From reflect test: -// The X in S11.S6 and S12.S6 annihilate, but they also block the X in S13.S8.S9. -type S10 struct { - S11 - S12 - S13 -} - -type S11 struct { - S6 -} - -type S12 struct { - S6 -} - -type S13 struct { - S8 -} - -type unmarshalTest struct { - in string - ptr interface{} - out interface{} - err error - useNumber bool -} - -type XYZ struct { - X interface{} - Y interface{} - Z interface{} -} - -func sliceAddr(x []int) *[]int { return &x } -func mapAddr(x map[string]int) *map[string]int { return &x } - -var unmarshalTests = []unmarshalTest{ - // basic types - {in: `true`, ptr: new(bool), out: true}, - {in: `1`, ptr: new(int), out: 1}, - {in: `1.2`, ptr: new(float64), out: 1.2}, - {in: `-5`, ptr: new(int16), out: int16(-5)}, - {in: `2`, ptr: new(Number), out: Number("2"), useNumber: true}, - {in: `2`, ptr: new(Number), out: Number("2")}, - {in: `2`, ptr: new(interface{}), out: float64(2.0)}, - {in: `2`, ptr: new(interface{}), out: Number("2"), useNumber: true}, - {in: `"a\u1234"`, ptr: new(string), out: "a\u1234"}, - {in: `"http:\/\/"`, ptr: new(string), out: "http://"}, - {in: `"g-clef: \uD834\uDD1E"`, ptr: new(string), out: "g-clef: \U0001D11E"}, - {in: `"invalid: \uD834x\uDD1E"`, ptr: new(string), out: "invalid: \uFFFDx\uFFFD"}, - {in: "null", ptr: new(interface{}), out: nil}, - {in: `{"X": [1,2,3], "Y": 4}`, ptr: new(T), out: T{Y: 4}, err: &UnmarshalTypeError{"array", reflect.TypeOf(""), 7}}, - {in: `{"x": 1}`, ptr: new(tx), out: tx{}}, - {in: `{"F1":1,"F2":2,"F3":3}`, ptr: new(V), out: V{F1: float64(1), F2: int32(2), F3: Number("3")}}, - {in: `{"F1":1,"F2":2,"F3":3}`, ptr: new(V), out: V{F1: Number("1"), F2: int32(2), F3: Number("3")}, useNumber: true}, - {in: `{"k1":1,"k2":"s","k3":[1,2.0,3e-3],"k4":{"kk1":"s","kk2":2}}`, ptr: new(interface{}), out: ifaceNumAsFloat64}, - {in: `{"k1":1,"k2":"s","k3":[1,2.0,3e-3],"k4":{"kk1":"s","kk2":2}}`, ptr: new(interface{}), out: ifaceNumAsNumber, useNumber: true}, - - // raw values with whitespace - {in: "\n true ", ptr: new(bool), out: true}, - {in: "\t 1 ", ptr: new(int), out: 1}, - {in: "\r 1.2 ", ptr: new(float64), out: 1.2}, - {in: "\t -5 \n", ptr: new(int16), out: int16(-5)}, - {in: "\t \"a\\u1234\" \n", ptr: new(string), out: "a\u1234"}, - - // Z has a "-" tag. - {in: `{"Y": 1, "Z": 2}`, ptr: new(T), out: T{Y: 1}}, - - {in: `{"alpha": "abc", "alphabet": "xyz"}`, ptr: new(U), out: U{Alphabet: "abc"}}, - {in: `{"alpha": "abc"}`, ptr: new(U), out: U{Alphabet: "abc"}}, - {in: `{"alphabet": "xyz"}`, ptr: new(U), out: U{}}, - - // syntax errors - {in: `{"X": "foo", "Y"}`, err: &SyntaxError{"invalid character '}' after object key", 17}}, - {in: `[1, 2, 3+]`, err: &SyntaxError{"invalid character '+' after array element", 9}}, - {in: `{"X":12x}`, err: &SyntaxError{"invalid character 'x' after object key:value pair", 8}, useNumber: true}, - - // raw value errors - {in: "\x01 42", err: &SyntaxError{"invalid character '\\x01' looking for beginning of value", 1}}, - {in: " 42 \x01", err: &SyntaxError{"invalid character '\\x01' after top-level value", 5}}, - {in: "\x01 true", err: &SyntaxError{"invalid character '\\x01' looking for beginning of value", 1}}, - {in: " false \x01", err: &SyntaxError{"invalid character '\\x01' after top-level value", 8}}, - {in: "\x01 1.2", err: &SyntaxError{"invalid character '\\x01' looking for beginning of value", 1}}, - {in: " 3.4 \x01", err: &SyntaxError{"invalid character '\\x01' after top-level value", 6}}, - {in: "\x01 \"string\"", err: &SyntaxError{"invalid character '\\x01' looking for beginning of value", 1}}, - {in: " \"string\" \x01", err: &SyntaxError{"invalid character '\\x01' after top-level value", 11}}, - - // array tests - {in: `[1, 2, 3]`, ptr: new([3]int), out: [3]int{1, 2, 3}}, - {in: `[1, 2, 3]`, ptr: new([1]int), out: [1]int{1}}, - {in: `[1, 2, 3]`, ptr: new([5]int), out: [5]int{1, 2, 3, 0, 0}}, - - // empty array to interface test - {in: `[]`, ptr: new([]interface{}), out: []interface{}{}}, - {in: `null`, ptr: new([]interface{}), out: []interface{}(nil)}, - {in: `{"T":[]}`, ptr: new(map[string]interface{}), out: map[string]interface{}{"T": []interface{}{}}}, - {in: `{"T":null}`, ptr: new(map[string]interface{}), out: map[string]interface{}{"T": interface{}(nil)}}, - - // composite tests - {in: allValueIndent, ptr: new(All), out: allValue}, - {in: allValueCompact, ptr: new(All), out: allValue}, - {in: allValueIndent, ptr: new(*All), out: &allValue}, - {in: allValueCompact, ptr: new(*All), out: &allValue}, - {in: pallValueIndent, ptr: new(All), out: pallValue}, - {in: pallValueCompact, ptr: new(All), out: pallValue}, - {in: pallValueIndent, ptr: new(*All), out: &pallValue}, - {in: pallValueCompact, ptr: new(*All), out: &pallValue}, - - // unmarshal interface test - {in: `{"T":false}`, ptr: &um0, out: umtrue}, // use "false" so test will fail if custom unmarshaler is not called - {in: `{"T":false}`, ptr: &ump, out: &umtrue}, - {in: `[{"T":false}]`, ptr: &umslice, out: umslice}, - {in: `[{"T":false}]`, ptr: &umslicep, out: &umslice}, - {in: `{"M":{"T":false}}`, ptr: &umstruct, out: umstruct}, - - // UnmarshalText interface test - {in: `"X"`, ptr: &um0T, out: umtrueT}, // use "false" so test will fail if custom unmarshaler is not called - {in: `"X"`, ptr: &umpT, out: &umtrueT}, - {in: `["X"]`, ptr: &umsliceT, out: umsliceT}, - {in: `["X"]`, ptr: &umslicepT, out: &umsliceT}, - {in: `{"M":"X"}`, ptr: &umstructT, out: umstructT}, - - // Overwriting of data. - // This is different from package xml, but it's what we've always done. - // Now documented and tested. - {in: `[2]`, ptr: sliceAddr([]int{1}), out: []int{2}}, - {in: `{"key": 2}`, ptr: mapAddr(map[string]int{"old": 0, "key": 1}), out: map[string]int{"key": 2}}, - - { - in: `{ - "Level0": 1, - "Level1b": 2, - "Level1c": 3, - "x": 4, - "Level1a": 5, - "LEVEL1B": 6, - "e": { - "Level1a": 8, - "Level1b": 9, - "Level1c": 10, - "Level1d": 11, - "x": 12 - }, - "Loop1": 13, - "Loop2": 14, - "X": 15, - "Y": 16, - "Z": 17, - "Q": 18 - }`, - ptr: new(Top), - out: Top{ - Level0: 1, - Embed0: Embed0{ - Level1b: 2, - Level1c: 3, - }, - Embed0a: &Embed0a{ - Level1a: 5, - Level1b: 6, - }, - Embed0b: &Embed0b{ - Level1a: 8, - Level1b: 9, - Level1c: 10, - Level1d: 11, - Level1e: 12, - }, - Loop: Loop{ - Loop1: 13, - Loop2: 14, - }, - Embed0p: Embed0p{ - Point: image.Point{X: 15, Y: 16}, - }, - Embed0q: Embed0q{ - Point: Point{Z: 17}, - }, - embed: embed{ - Q: 18, - }, - }, - }, - { - in: `{"X": 1,"Y":2}`, - ptr: new(S5), - out: S5{S8: S8{S9: S9{Y: 2}}}, - }, - { - in: `{"X": 1,"Y":2}`, - ptr: new(S10), - out: S10{S13: S13{S8: S8{S9: S9{Y: 2}}}}, - }, - - // invalid UTF-8 is coerced to valid UTF-8. - { - in: "\"hello\xffworld\"", - ptr: new(string), - out: "hello\ufffdworld", - }, - { - in: "\"hello\xc2\xc2world\"", - ptr: new(string), - out: "hello\ufffd\ufffdworld", - }, - { - in: "\"hello\xc2\xffworld\"", - ptr: new(string), - out: "hello\ufffd\ufffdworld", - }, - { - in: "\"hello\\ud800world\"", - ptr: new(string), - out: "hello\ufffdworld", - }, - { - in: "\"hello\\ud800\\ud800world\"", - ptr: new(string), - out: "hello\ufffd\ufffdworld", - }, - { - in: "\"hello\\ud800\\ud800world\"", - ptr: new(string), - out: "hello\ufffd\ufffdworld", - }, - { - in: "\"hello\xed\xa0\x80\xed\xb0\x80world\"", - ptr: new(string), - out: "hello\ufffd\ufffd\ufffd\ufffd\ufffd\ufffdworld", - }, - - // issue 8305 - { - in: `{"2009-11-10T23:00:00Z": "hello world"}`, - ptr: &map[time.Time]string{}, - err: &UnmarshalTypeError{"object", reflect.TypeOf(map[time.Time]string{}), 1}, - }, -} - -func TestMarshal(t *testing.T) { - b, err := Marshal(allValue) - if err != nil { - t.Fatalf("Marshal allValue: %v", err) - } - if string(b) != allValueCompact { - t.Errorf("Marshal allValueCompact") - diff(t, b, []byte(allValueCompact)) - return - } - - b, err = Marshal(pallValue) - if err != nil { - t.Fatalf("Marshal pallValue: %v", err) - } - if string(b) != pallValueCompact { - t.Errorf("Marshal pallValueCompact") - diff(t, b, []byte(pallValueCompact)) - return - } -} - -var badUTF8 = []struct { - in, out string -}{ - {"hello\xffworld", `"hello\ufffdworld"`}, - {"", `""`}, - {"\xff", `"\ufffd"`}, - {"\xff\xff", `"\ufffd\ufffd"`}, - {"a\xffb", `"a\ufffdb"`}, - {"\xe6\x97\xa5\xe6\x9c\xac\xff\xaa\x9e", `"日本\ufffd\ufffd\ufffd"`}, -} - -func TestMarshalBadUTF8(t *testing.T) { - for _, tt := range badUTF8 { - b, err := Marshal(tt.in) - if string(b) != tt.out || err != nil { - t.Errorf("Marshal(%q) = %#q, %v, want %#q, nil", tt.in, b, err, tt.out) - } - } -} - -func TestMarshalNumberZeroVal(t *testing.T) { - var n Number - out, err := Marshal(n) - if err != nil { - t.Fatal(err) - } - outStr := string(out) - if outStr != "0" { - t.Fatalf("Invalid zero val for Number: %q", outStr) - } -} - -func TestMarshalEmbeds(t *testing.T) { - top := &Top{ - Level0: 1, - Embed0: Embed0{ - Level1b: 2, - Level1c: 3, - }, - Embed0a: &Embed0a{ - Level1a: 5, - Level1b: 6, - }, - Embed0b: &Embed0b{ - Level1a: 8, - Level1b: 9, - Level1c: 10, - Level1d: 11, - Level1e: 12, - }, - Loop: Loop{ - Loop1: 13, - Loop2: 14, - }, - Embed0p: Embed0p{ - Point: image.Point{X: 15, Y: 16}, - }, - Embed0q: Embed0q{ - Point: Point{Z: 17}, - }, - embed: embed{ - Q: 18, - }, - } - b, err := Marshal(top) - if err != nil { - t.Fatal(err) - } - want := "{\"Level0\":1,\"Level1b\":2,\"Level1c\":3,\"Level1a\":5,\"LEVEL1B\":6,\"e\":{\"Level1a\":8,\"Level1b\":9,\"Level1c\":10,\"Level1d\":11,\"x\":12},\"Loop1\":13,\"Loop2\":14,\"X\":15,\"Y\":16,\"Z\":17,\"Q\":18}" - if string(b) != want { - t.Errorf("Wrong marshal result.\n got: %q\nwant: %q", b, want) - } -} - -func TestUnmarshal(t *testing.T) { - for i, tt := range unmarshalTests { - var scan scanner - in := []byte(tt.in) - if err := checkValid(in, &scan); err != nil { - if !reflect.DeepEqual(err, tt.err) { - t.Errorf("#%d: checkValid: %#v", i, err) - continue - } - } - if tt.ptr == nil { - continue - } - - // v = new(right-type) - v := reflect.New(reflect.TypeOf(tt.ptr).Elem()) - dec := NewDecoder(bytes.NewReader(in)) - if tt.useNumber { - dec.UseNumber() - } - if err := dec.Decode(v.Interface()); !reflect.DeepEqual(err, tt.err) { - t.Errorf("#%d: %v, want %v", i, err, tt.err) - continue - } else if err != nil { - continue - } - if !reflect.DeepEqual(v.Elem().Interface(), tt.out) { - t.Errorf("#%d: mismatch\nhave: %#+v\nwant: %#+v", i, v.Elem().Interface(), tt.out) - data, _ := Marshal(v.Elem().Interface()) - println(string(data)) - data, _ = Marshal(tt.out) - println(string(data)) - continue - } - - // Check round trip. - if tt.err == nil { - enc, err := Marshal(v.Interface()) - if err != nil { - t.Errorf("#%d: error re-marshaling: %v", i, err) - continue - } - vv := reflect.New(reflect.TypeOf(tt.ptr).Elem()) - dec = NewDecoder(bytes.NewReader(enc)) - if tt.useNumber { - dec.UseNumber() - } - if err := dec.Decode(vv.Interface()); err != nil { - t.Errorf("#%d: error re-unmarshaling %#q: %v", i, enc, err) - continue - } - if !reflect.DeepEqual(v.Elem().Interface(), vv.Elem().Interface()) { - t.Errorf("#%d: mismatch\nhave: %#+v\nwant: %#+v", i, v.Elem().Interface(), vv.Elem().Interface()) - t.Errorf(" In: %q", strings.Map(noSpace, string(in))) - t.Errorf("Marshal: %q", strings.Map(noSpace, string(enc))) - continue - } - } - } -} - -func TestUnmarshalMarshal(t *testing.T) { - initBig() - var v interface{} - if err := Unmarshal(jsonBig, &v); err != nil { - t.Fatalf("Unmarshal: %v", err) - } - b, err := Marshal(v) - if err != nil { - t.Fatalf("Marshal: %v", err) - } - if !bytes.Equal(jsonBig, b) { - t.Errorf("Marshal jsonBig") - diff(t, b, jsonBig) - return - } -} - -var numberTests = []struct { - in string - i int64 - intErr string - f float64 - floatErr string -}{ - {in: "-1.23e1", intErr: "strconv.ParseInt: parsing \"-1.23e1\": invalid syntax", f: -1.23e1}, - {in: "-12", i: -12, f: -12.0}, - {in: "1e1000", intErr: "strconv.ParseInt: parsing \"1e1000\": invalid syntax", floatErr: "strconv.ParseFloat: parsing \"1e1000\": value out of range"}, -} - -// Independent of Decode, basic coverage of the accessors in Number -func TestNumberAccessors(t *testing.T) { - for _, tt := range numberTests { - n := Number(tt.in) - if s := n.String(); s != tt.in { - t.Errorf("Number(%q).String() is %q", tt.in, s) - } - if i, err := n.Int64(); err == nil && tt.intErr == "" && i != tt.i { - t.Errorf("Number(%q).Int64() is %d", tt.in, i) - } else if (err == nil && tt.intErr != "") || (err != nil && err.Error() != tt.intErr) { - t.Errorf("Number(%q).Int64() wanted error %q but got: %v", tt.in, tt.intErr, err) - } - if f, err := n.Float64(); err == nil && tt.floatErr == "" && f != tt.f { - t.Errorf("Number(%q).Float64() is %g", tt.in, f) - } else if (err == nil && tt.floatErr != "") || (err != nil && err.Error() != tt.floatErr) { - t.Errorf("Number(%q).Float64() wanted error %q but got: %v", tt.in, tt.floatErr, err) - } - } -} - -func TestLargeByteSlice(t *testing.T) { - s0 := make([]byte, 2000) - for i := range s0 { - s0[i] = byte(i) - } - b, err := Marshal(s0) - if err != nil { - t.Fatalf("Marshal: %v", err) - } - var s1 []byte - if err := Unmarshal(b, &s1); err != nil { - t.Fatalf("Unmarshal: %v", err) - } - if !bytes.Equal(s0, s1) { - t.Errorf("Marshal large byte slice") - diff(t, s0, s1) - } -} - -type Xint struct { - X int -} - -func TestUnmarshalInterface(t *testing.T) { - var xint Xint - var i interface{} = &xint - if err := Unmarshal([]byte(`{"X":1}`), &i); err != nil { - t.Fatalf("Unmarshal: %v", err) - } - if xint.X != 1 { - t.Fatalf("Did not write to xint") - } -} - -func TestUnmarshalPtrPtr(t *testing.T) { - var xint Xint - pxint := &xint - if err := Unmarshal([]byte(`{"X":1}`), &pxint); err != nil { - t.Fatalf("Unmarshal: %v", err) - } - if xint.X != 1 { - t.Fatalf("Did not write to xint") - } -} - -func TestEscape(t *testing.T) { - const input = `"foobar"` + " [\u2028 \u2029]" - const expected = `"\"foobar\"\u003chtml\u003e [\u2028 \u2029]"` - b, err := Marshal(input) - if err != nil { - t.Fatalf("Marshal error: %v", err) - } - if s := string(b); s != expected { - t.Errorf("Encoding of [%s]:\n got [%s]\nwant [%s]", input, s, expected) - } -} - -// WrongString is a struct that's misusing the ,string modifier. -type WrongString struct { - Message string `json:"result,string"` -} - -type wrongStringTest struct { - in, err string -} - -var wrongStringTests = []wrongStringTest{ - {`{"result":"x"}`, `json: invalid use of ,string struct tag, trying to unmarshal "x" into string`}, - {`{"result":"foo"}`, `json: invalid use of ,string struct tag, trying to unmarshal "foo" into string`}, - {`{"result":"123"}`, `json: invalid use of ,string struct tag, trying to unmarshal "123" into string`}, - {`{"result":123}`, `json: invalid use of ,string struct tag, trying to unmarshal unquoted value into string`}, -} - -// If people misuse the ,string modifier, the error message should be -// helpful, telling the user that they're doing it wrong. -func TestErrorMessageFromMisusedString(t *testing.T) { - for n, tt := range wrongStringTests { - r := strings.NewReader(tt.in) - var s WrongString - err := NewDecoder(r).Decode(&s) - got := fmt.Sprintf("%v", err) - if got != tt.err { - t.Errorf("%d. got err = %q, want %q", n, got, tt.err) - } - } -} - -func noSpace(c rune) rune { - if isSpace(byte(c)) { //only used for ascii - return -1 - } - return c -} - -type All struct { - Bool bool - Int int - Int8 int8 - Int16 int16 - Int32 int32 - Int64 int64 - Uint uint - Uint8 uint8 - Uint16 uint16 - Uint32 uint32 - Uint64 uint64 - Uintptr uintptr - Float32 float32 - Float64 float64 - - Foo string `json:"bar"` - Foo2 string `json:"bar2,dummyopt"` - - IntStr int64 `json:",string"` - - PBool *bool - PInt *int - PInt8 *int8 - PInt16 *int16 - PInt32 *int32 - PInt64 *int64 - PUint *uint - PUint8 *uint8 - PUint16 *uint16 - PUint32 *uint32 - PUint64 *uint64 - PUintptr *uintptr - PFloat32 *float32 - PFloat64 *float64 - - String string - PString *string - - Map map[string]Small - MapP map[string]*Small - PMap *map[string]Small - PMapP *map[string]*Small - - EmptyMap map[string]Small - NilMap map[string]Small - - Slice []Small - SliceP []*Small - PSlice *[]Small - PSliceP *[]*Small - - EmptySlice []Small - NilSlice []Small - - StringSlice []string - ByteSlice []byte - - Small Small - PSmall *Small - PPSmall **Small - - Interface interface{} - PInterface *interface{} - - unexported int -} - -type Small struct { - Tag string -} - -var allValue = All{ - Bool: true, - Int: 2, - Int8: 3, - Int16: 4, - Int32: 5, - Int64: 6, - Uint: 7, - Uint8: 8, - Uint16: 9, - Uint32: 10, - Uint64: 11, - Uintptr: 12, - Float32: 14.1, - Float64: 15.1, - Foo: "foo", - Foo2: "foo2", - IntStr: 42, - String: "16", - Map: map[string]Small{ - "17": {Tag: "tag17"}, - "18": {Tag: "tag18"}, - }, - MapP: map[string]*Small{ - "19": {Tag: "tag19"}, - "20": nil, - }, - EmptyMap: map[string]Small{}, - Slice: []Small{{Tag: "tag20"}, {Tag: "tag21"}}, - SliceP: []*Small{{Tag: "tag22"}, nil, {Tag: "tag23"}}, - EmptySlice: []Small{}, - StringSlice: []string{"str24", "str25", "str26"}, - ByteSlice: []byte{27, 28, 29}, - Small: Small{Tag: "tag30"}, - PSmall: &Small{Tag: "tag31"}, - Interface: 5.2, -} - -var pallValue = All{ - PBool: &allValue.Bool, - PInt: &allValue.Int, - PInt8: &allValue.Int8, - PInt16: &allValue.Int16, - PInt32: &allValue.Int32, - PInt64: &allValue.Int64, - PUint: &allValue.Uint, - PUint8: &allValue.Uint8, - PUint16: &allValue.Uint16, - PUint32: &allValue.Uint32, - PUint64: &allValue.Uint64, - PUintptr: &allValue.Uintptr, - PFloat32: &allValue.Float32, - PFloat64: &allValue.Float64, - PString: &allValue.String, - PMap: &allValue.Map, - PMapP: &allValue.MapP, - PSlice: &allValue.Slice, - PSliceP: &allValue.SliceP, - PPSmall: &allValue.PSmall, - PInterface: &allValue.Interface, -} - -var allValueIndent = `{ - "Bool": true, - "Int": 2, - "Int8": 3, - "Int16": 4, - "Int32": 5, - "Int64": 6, - "Uint": 7, - "Uint8": 8, - "Uint16": 9, - "Uint32": 10, - "Uint64": 11, - "Uintptr": 12, - "Float32": 14.1, - "Float64": 15.1, - "bar": "foo", - "bar2": "foo2", - "IntStr": "42", - "PBool": null, - "PInt": null, - "PInt8": null, - "PInt16": null, - "PInt32": null, - "PInt64": null, - "PUint": null, - "PUint8": null, - "PUint16": null, - "PUint32": null, - "PUint64": null, - "PUintptr": null, - "PFloat32": null, - "PFloat64": null, - "String": "16", - "PString": null, - "Map": { - "17": { - "Tag": "tag17" - }, - "18": { - "Tag": "tag18" - } - }, - "MapP": { - "19": { - "Tag": "tag19" - }, - "20": null - }, - "PMap": null, - "PMapP": null, - "EmptyMap": {}, - "NilMap": null, - "Slice": [ - { - "Tag": "tag20" - }, - { - "Tag": "tag21" - } - ], - "SliceP": [ - { - "Tag": "tag22" - }, - null, - { - "Tag": "tag23" - } - ], - "PSlice": null, - "PSliceP": null, - "EmptySlice": [], - "NilSlice": null, - "StringSlice": [ - "str24", - "str25", - "str26" - ], - "ByteSlice": "Gxwd", - "Small": { - "Tag": "tag30" - }, - "PSmall": { - "Tag": "tag31" - }, - "PPSmall": null, - "Interface": 5.2, - "PInterface": null -}` - -var allValueCompact = strings.Map(noSpace, allValueIndent) - -var pallValueIndent = `{ - "Bool": false, - "Int": 0, - "Int8": 0, - "Int16": 0, - "Int32": 0, - "Int64": 0, - "Uint": 0, - "Uint8": 0, - "Uint16": 0, - "Uint32": 0, - "Uint64": 0, - "Uintptr": 0, - "Float32": 0, - "Float64": 0, - "bar": "", - "bar2": "", - "IntStr": "0", - "PBool": true, - "PInt": 2, - "PInt8": 3, - "PInt16": 4, - "PInt32": 5, - "PInt64": 6, - "PUint": 7, - "PUint8": 8, - "PUint16": 9, - "PUint32": 10, - "PUint64": 11, - "PUintptr": 12, - "PFloat32": 14.1, - "PFloat64": 15.1, - "String": "", - "PString": "16", - "Map": null, - "MapP": null, - "PMap": { - "17": { - "Tag": "tag17" - }, - "18": { - "Tag": "tag18" - } - }, - "PMapP": { - "19": { - "Tag": "tag19" - }, - "20": null - }, - "EmptyMap": null, - "NilMap": null, - "Slice": null, - "SliceP": null, - "PSlice": [ - { - "Tag": "tag20" - }, - { - "Tag": "tag21" - } - ], - "PSliceP": [ - { - "Tag": "tag22" - }, - null, - { - "Tag": "tag23" - } - ], - "EmptySlice": null, - "NilSlice": null, - "StringSlice": null, - "ByteSlice": null, - "Small": { - "Tag": "" - }, - "PSmall": null, - "PPSmall": { - "Tag": "tag31" - }, - "Interface": null, - "PInterface": 5.2 -}` - -var pallValueCompact = strings.Map(noSpace, pallValueIndent) - -func TestRefUnmarshal(t *testing.T) { - type S struct { - // Ref is defined in encode_test.go. - R0 Ref - R1 *Ref - R2 RefText - R3 *RefText - } - want := S{ - R0: 12, - R1: new(Ref), - R2: 13, - R3: new(RefText), - } - *want.R1 = 12 - *want.R3 = 13 - - var got S - if err := Unmarshal([]byte(`{"R0":"ref","R1":"ref","R2":"ref","R3":"ref"}`), &got); err != nil { - t.Fatalf("Unmarshal: %v", err) - } - if !reflect.DeepEqual(got, want) { - t.Errorf("got %+v, want %+v", got, want) - } -} - -// Test that the empty string doesn't panic decoding when ,string is specified -// Issue 3450 -func TestEmptyString(t *testing.T) { - type T2 struct { - Number1 int `json:",string"` - Number2 int `json:",string"` - } - data := `{"Number1":"1", "Number2":""}` - dec := NewDecoder(strings.NewReader(data)) - var t2 T2 - err := dec.Decode(&t2) - if err == nil { - t.Fatal("Decode: did not return error") - } - if t2.Number1 != 1 { - t.Fatal("Decode: did not set Number1") - } -} - -// Test that a null for ,string is not replaced with the previous quoted string (issue 7046). -// It should also not be an error (issue 2540, issue 8587). -func TestNullString(t *testing.T) { - type T struct { - A int `json:",string"` - B int `json:",string"` - C *int `json:",string"` - } - data := []byte(`{"A": "1", "B": null, "C": null}`) - var s T - s.B = 1 - s.C = new(int) - *s.C = 2 - err := Unmarshal(data, &s) - if err != nil { - t.Fatalf("Unmarshal: %v", err) - } - if s.B != 1 || s.C != nil { - t.Fatalf("after Unmarshal, s.B=%d, s.C=%p, want 1, nil", s.B, s.C) - } -} - -func intp(x int) *int { - p := new(int) - *p = x - return p -} - -func intpp(x *int) **int { - pp := new(*int) - *pp = x - return pp -} - -var interfaceSetTests = []struct { - pre interface{} - json string - post interface{} -}{ - {"foo", `"bar"`, "bar"}, - {"foo", `2`, 2.0}, - {"foo", `true`, true}, - {"foo", `null`, nil}, - - {nil, `null`, nil}, - {new(int), `null`, nil}, - {(*int)(nil), `null`, nil}, - {new(*int), `null`, new(*int)}, - {(**int)(nil), `null`, nil}, - {intp(1), `null`, nil}, - {intpp(nil), `null`, intpp(nil)}, - {intpp(intp(1)), `null`, intpp(nil)}, -} - -func TestInterfaceSet(t *testing.T) { - for _, tt := range interfaceSetTests { - b := struct{ X interface{} }{tt.pre} - blob := `{"X":` + tt.json + `}` - if err := Unmarshal([]byte(blob), &b); err != nil { - t.Errorf("Unmarshal %#q: %v", blob, err) - continue - } - if !reflect.DeepEqual(b.X, tt.post) { - t.Errorf("Unmarshal %#q into %#v: X=%#v, want %#v", blob, tt.pre, b.X, tt.post) - } - } -} - -// JSON null values should be ignored for primitives and string values instead of resulting in an error. -// Issue 2540 -func TestUnmarshalNulls(t *testing.T) { - jsonData := []byte(`{ - "Bool" : null, - "Int" : null, - "Int8" : null, - "Int16" : null, - "Int32" : null, - "Int64" : null, - "Uint" : null, - "Uint8" : null, - "Uint16" : null, - "Uint32" : null, - "Uint64" : null, - "Float32" : null, - "Float64" : null, - "String" : null}`) - - nulls := All{ - Bool: true, - Int: 2, - Int8: 3, - Int16: 4, - Int32: 5, - Int64: 6, - Uint: 7, - Uint8: 8, - Uint16: 9, - Uint32: 10, - Uint64: 11, - Float32: 12.1, - Float64: 13.1, - String: "14"} - - err := Unmarshal(jsonData, &nulls) - if err != nil { - t.Errorf("Unmarshal of null values failed: %v", err) - } - if !nulls.Bool || nulls.Int != 2 || nulls.Int8 != 3 || nulls.Int16 != 4 || nulls.Int32 != 5 || nulls.Int64 != 6 || - nulls.Uint != 7 || nulls.Uint8 != 8 || nulls.Uint16 != 9 || nulls.Uint32 != 10 || nulls.Uint64 != 11 || - nulls.Float32 != 12.1 || nulls.Float64 != 13.1 || nulls.String != "14" { - - t.Errorf("Unmarshal of null values affected primitives") - } -} - -func TestStringKind(t *testing.T) { - type stringKind string - - var m1, m2 map[stringKind]int - m1 = map[stringKind]int{ - "foo": 42, - } - - data, err := Marshal(m1) - if err != nil { - t.Errorf("Unexpected error marshaling: %v", err) - } - - err = Unmarshal(data, &m2) - if err != nil { - t.Errorf("Unexpected error unmarshaling: %v", err) - } - - if !reflect.DeepEqual(m1, m2) { - t.Error("Items should be equal after encoding and then decoding") - } -} - -// Custom types with []byte as underlying type could not be marshalled -// and then unmarshalled. -// Issue 8962. -func TestByteKind(t *testing.T) { - type byteKind []byte - - a := byteKind("hello") - - data, err := Marshal(a) - if err != nil { - t.Error(err) - } - var b byteKind - err = Unmarshal(data, &b) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(a, b) { - t.Errorf("expected %v == %v", a, b) - } -} - -// The fix for issue 8962 introduced a regression. -// Issue 12921. -func TestSliceOfCustomByte(t *testing.T) { - type Uint8 uint8 - - a := []Uint8("hello") - - data, err := Marshal(a) - if err != nil { - t.Fatal(err) - } - var b []Uint8 - err = Unmarshal(data, &b) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(a, b) { - t.Fatal("expected %v == %v", a, b) - } -} - -var decodeTypeErrorTests = []struct { - dest interface{} - src string -}{ - {new(string), `{"user": "name"}`}, // issue 4628. - {new(error), `{}`}, // issue 4222 - {new(error), `[]`}, - {new(error), `""`}, - {new(error), `123`}, - {new(error), `true`}, -} - -func TestUnmarshalTypeError(t *testing.T) { - for _, item := range decodeTypeErrorTests { - err := Unmarshal([]byte(item.src), item.dest) - if _, ok := err.(*UnmarshalTypeError); !ok { - t.Errorf("expected type error for Unmarshal(%q, type %T): got %T", - item.src, item.dest, err) - } - } -} - -var unmarshalSyntaxTests = []string{ - "tru", - "fals", - "nul", - "123e", - `"hello`, - `[1,2,3`, - `{"key":1`, - `{"key":1,`, -} - -func TestUnmarshalSyntax(t *testing.T) { - var x interface{} - for _, src := range unmarshalSyntaxTests { - err := Unmarshal([]byte(src), &x) - if _, ok := err.(*SyntaxError); !ok { - t.Errorf("expected syntax error for Unmarshal(%q): got %T", src, err) - } - } -} - -// Test handling of unexported fields that should be ignored. -// Issue 4660 -type unexportedFields struct { - Name string - m map[string]interface{} `json:"-"` - m2 map[string]interface{} `json:"abcd"` -} - -func TestUnmarshalUnexported(t *testing.T) { - input := `{"Name": "Bob", "m": {"x": 123}, "m2": {"y": 456}, "abcd": {"z": 789}}` - want := &unexportedFields{Name: "Bob"} - - out := &unexportedFields{} - err := Unmarshal([]byte(input), out) - if err != nil { - t.Errorf("got error %v, expected nil", err) - } - if !reflect.DeepEqual(out, want) { - t.Errorf("got %q, want %q", out, want) - } -} - -// Time3339 is a time.Time which encodes to and from JSON -// as an RFC 3339 time in UTC. -type Time3339 time.Time - -func (t *Time3339) UnmarshalJSON(b []byte) error { - if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' { - return fmt.Errorf("types: failed to unmarshal non-string value %q as an RFC 3339 time", b) - } - tm, err := time.Parse(time.RFC3339, string(b[1:len(b)-1])) - if err != nil { - return err - } - *t = Time3339(tm) - return nil -} - -func TestUnmarshalJSONLiteralError(t *testing.T) { - var t3 Time3339 - err := Unmarshal([]byte(`"0000-00-00T00:00:00Z"`), &t3) - if err == nil { - t.Fatalf("expected error; got time %v", time.Time(t3)) - } - if !strings.Contains(err.Error(), "range") { - t.Errorf("got err = %v; want out of range error", err) - } -} - -// Test that extra object elements in an array do not result in a -// "data changing underfoot" error. -// Issue 3717 -func TestSkipArrayObjects(t *testing.T) { - json := `[{}]` - var dest [0]interface{} - - err := Unmarshal([]byte(json), &dest) - if err != nil { - t.Errorf("got error %q, want nil", err) - } -} - -// Test semantics of pre-filled struct fields and pre-filled map fields. -// Issue 4900. -func TestPrefilled(t *testing.T) { - ptrToMap := func(m map[string]interface{}) *map[string]interface{} { return &m } - - // Values here change, cannot reuse table across runs. - var prefillTests = []struct { - in string - ptr interface{} - out interface{} - }{ - { - in: `{"X": 1, "Y": 2}`, - ptr: &XYZ{X: float32(3), Y: int16(4), Z: 1.5}, - out: &XYZ{X: float64(1), Y: float64(2), Z: 1.5}, - }, - { - in: `{"X": 1, "Y": 2}`, - ptr: ptrToMap(map[string]interface{}{"X": float32(3), "Y": int16(4), "Z": 1.5}), - out: ptrToMap(map[string]interface{}{"X": float64(1), "Y": float64(2), "Z": 1.5}), - }, - } - - for _, tt := range prefillTests { - ptrstr := fmt.Sprintf("%v", tt.ptr) - err := Unmarshal([]byte(tt.in), tt.ptr) // tt.ptr edited here - if err != nil { - t.Errorf("Unmarshal: %v", err) - } - if !reflect.DeepEqual(tt.ptr, tt.out) { - t.Errorf("Unmarshal(%#q, %s): have %v, want %v", tt.in, ptrstr, tt.ptr, tt.out) - } - } -} - -var invalidUnmarshalTests = []struct { - v interface{} - want string -}{ - {nil, "json: Unmarshal(nil)"}, - {struct{}{}, "json: Unmarshal(non-pointer struct {})"}, - {(*int)(nil), "json: Unmarshal(nil *int)"}, -} - -func TestInvalidUnmarshal(t *testing.T) { - buf := []byte(`{"a":"1"}`) - for _, tt := range invalidUnmarshalTests { - err := Unmarshal(buf, tt.v) - if err == nil { - t.Errorf("Unmarshal expecting error, got nil") - continue - } - if got := err.Error(); got != tt.want { - t.Errorf("Unmarshal = %q; want %q", got, tt.want) - } - } -} - -var invalidUnmarshalTextTests = []struct { - v interface{} - want string -}{ - {nil, "json: Unmarshal(nil)"}, - {struct{}{}, "json: Unmarshal(non-pointer struct {})"}, - {(*int)(nil), "json: Unmarshal(nil *int)"}, - {new(net.IP), "json: cannot unmarshal string into Go value of type *net.IP"}, -} - -func TestInvalidUnmarshalText(t *testing.T) { - buf := []byte(`123`) - for _, tt := range invalidUnmarshalTextTests { - err := Unmarshal(buf, tt.v) - if err == nil { - t.Errorf("Unmarshal expecting error, got nil") - continue - } - if got := err.Error(); got != tt.want { - t.Errorf("Unmarshal = %q; want %q", got, tt.want) - } - } -} - -// Test that string option is ignored for invalid types. -// Issue 9812. -func TestInvalidStringOption(t *testing.T) { - num := 0 - item := struct { - T time.Time `json:",string"` - M map[string]string `json:",string"` - S []string `json:",string"` - A [1]string `json:",string"` - I interface{} `json:",string"` - P *int `json:",string"` - }{M: make(map[string]string), S: make([]string, 0), I: num, P: &num} - - data, err := Marshal(item) - if err != nil { - t.Fatalf("Marshal: %v", err) - } - - err = Unmarshal(data, &item) - if err != nil { - t.Fatalf("Unmarshal: %v", err) - } -} diff --git a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/encode_test.go b/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/encode_test.go deleted file mode 100644 index c00491e0..00000000 --- a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/encode_test.go +++ /dev/null @@ -1,538 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package json - -import ( - "bytes" - "math" - "reflect" - "testing" - "unicode" -) - -type Optionals struct { - Sr string `json:"sr"` - So string `json:"so,omitempty"` - Sw string `json:"-"` - - Ir int `json:"omitempty"` // actually named omitempty, not an option - Io int `json:"io,omitempty"` - - Slr []string `json:"slr,random"` - Slo []string `json:"slo,omitempty"` - - Mr map[string]interface{} `json:"mr"` - Mo map[string]interface{} `json:",omitempty"` - - Fr float64 `json:"fr"` - Fo float64 `json:"fo,omitempty"` - - Br bool `json:"br"` - Bo bool `json:"bo,omitempty"` - - Ur uint `json:"ur"` - Uo uint `json:"uo,omitempty"` - - Str struct{} `json:"str"` - Sto struct{} `json:"sto,omitempty"` -} - -var optionalsExpected = `{ - "sr": "", - "omitempty": 0, - "slr": null, - "mr": {}, - "fr": 0, - "br": false, - "ur": 0, - "str": {}, - "sto": {} -}` - -func TestOmitEmpty(t *testing.T) { - var o Optionals - o.Sw = "something" - o.Mr = map[string]interface{}{} - o.Mo = map[string]interface{}{} - - got, err := MarshalIndent(&o, "", " ") - if err != nil { - t.Fatal(err) - } - if got := string(got); got != optionalsExpected { - t.Errorf(" got: %s\nwant: %s\n", got, optionalsExpected) - } -} - -type StringTag struct { - BoolStr bool `json:",string"` - IntStr int64 `json:",string"` - StrStr string `json:",string"` -} - -var stringTagExpected = `{ - "BoolStr": "true", - "IntStr": "42", - "StrStr": "\"xzbit\"" -}` - -func TestStringTag(t *testing.T) { - var s StringTag - s.BoolStr = true - s.IntStr = 42 - s.StrStr = "xzbit" - got, err := MarshalIndent(&s, "", " ") - if err != nil { - t.Fatal(err) - } - if got := string(got); got != stringTagExpected { - t.Fatalf(" got: %s\nwant: %s\n", got, stringTagExpected) - } - - // Verify that it round-trips. - var s2 StringTag - err = NewDecoder(bytes.NewReader(got)).Decode(&s2) - if err != nil { - t.Fatalf("Decode: %v", err) - } - if !reflect.DeepEqual(s, s2) { - t.Fatalf("decode didn't match.\nsource: %#v\nEncoded as:\n%s\ndecode: %#v", s, string(got), s2) - } -} - -// byte slices are special even if they're renamed types. -type renamedByte byte -type renamedByteSlice []byte -type renamedRenamedByteSlice []renamedByte - -func TestEncodeRenamedByteSlice(t *testing.T) { - s := renamedByteSlice("abc") - result, err := Marshal(s) - if err != nil { - t.Fatal(err) - } - expect := `"YWJj"` - if string(result) != expect { - t.Errorf(" got %s want %s", result, expect) - } - r := renamedRenamedByteSlice("abc") - result, err = Marshal(r) - if err != nil { - t.Fatal(err) - } - if string(result) != expect { - t.Errorf(" got %s want %s", result, expect) - } -} - -var unsupportedValues = []interface{}{ - math.NaN(), - math.Inf(-1), - math.Inf(1), -} - -func TestUnsupportedValues(t *testing.T) { - for _, v := range unsupportedValues { - if _, err := Marshal(v); err != nil { - if _, ok := err.(*UnsupportedValueError); !ok { - t.Errorf("for %v, got %T want UnsupportedValueError", v, err) - } - } else { - t.Errorf("for %v, expected error", v) - } - } -} - -// Ref has Marshaler and Unmarshaler methods with pointer receiver. -type Ref int - -func (*Ref) MarshalJSON() ([]byte, error) { - return []byte(`"ref"`), nil -} - -func (r *Ref) UnmarshalJSON([]byte) error { - *r = 12 - return nil -} - -// Val has Marshaler methods with value receiver. -type Val int - -func (Val) MarshalJSON() ([]byte, error) { - return []byte(`"val"`), nil -} - -// RefText has Marshaler and Unmarshaler methods with pointer receiver. -type RefText int - -func (*RefText) MarshalText() ([]byte, error) { - return []byte(`"ref"`), nil -} - -func (r *RefText) UnmarshalText([]byte) error { - *r = 13 - return nil -} - -// ValText has Marshaler methods with value receiver. -type ValText int - -func (ValText) MarshalText() ([]byte, error) { - return []byte(`"val"`), nil -} - -func TestRefValMarshal(t *testing.T) { - var s = struct { - R0 Ref - R1 *Ref - R2 RefText - R3 *RefText - V0 Val - V1 *Val - V2 ValText - V3 *ValText - }{ - R0: 12, - R1: new(Ref), - R2: 14, - R3: new(RefText), - V0: 13, - V1: new(Val), - V2: 15, - V3: new(ValText), - } - const want = `{"R0":"ref","R1":"ref","R2":"\"ref\"","R3":"\"ref\"","V0":"val","V1":"val","V2":"\"val\"","V3":"\"val\""}` - b, err := Marshal(&s) - if err != nil { - t.Fatalf("Marshal: %v", err) - } - if got := string(b); got != want { - t.Errorf("got %q, want %q", got, want) - } -} - -// C implements Marshaler and returns unescaped JSON. -type C int - -func (C) MarshalJSON() ([]byte, error) { - return []byte(`"<&>"`), nil -} - -// CText implements Marshaler and returns unescaped text. -type CText int - -func (CText) MarshalText() ([]byte, error) { - return []byte(`"<&>"`), nil -} - -func TestMarshalerEscaping(t *testing.T) { - var c C - want := `"\u003c\u0026\u003e"` - b, err := Marshal(c) - if err != nil { - t.Fatalf("Marshal(c): %v", err) - } - if got := string(b); got != want { - t.Errorf("Marshal(c) = %#q, want %#q", got, want) - } - - var ct CText - want = `"\"\u003c\u0026\u003e\""` - b, err = Marshal(ct) - if err != nil { - t.Fatalf("Marshal(ct): %v", err) - } - if got := string(b); got != want { - t.Errorf("Marshal(ct) = %#q, want %#q", got, want) - } -} - -type IntType int - -type MyStruct struct { - IntType -} - -func TestAnonymousNonstruct(t *testing.T) { - var i IntType = 11 - a := MyStruct{i} - const want = `{"IntType":11}` - - b, err := Marshal(a) - if err != nil { - t.Fatalf("Marshal: %v", err) - } - if got := string(b); got != want { - t.Errorf("got %q, want %q", got, want) - } -} - -type BugA struct { - S string -} - -type BugB struct { - BugA - S string -} - -type BugC struct { - S string -} - -// Legal Go: We never use the repeated embedded field (S). -type BugX struct { - A int - BugA - BugB -} - -// Issue 5245. -func TestEmbeddedBug(t *testing.T) { - v := BugB{ - BugA{"A"}, - "B", - } - b, err := Marshal(v) - if err != nil { - t.Fatal("Marshal:", err) - } - want := `{"S":"B"}` - got := string(b) - if got != want { - t.Fatalf("Marshal: got %s want %s", got, want) - } - // Now check that the duplicate field, S, does not appear. - x := BugX{ - A: 23, - } - b, err = Marshal(x) - if err != nil { - t.Fatal("Marshal:", err) - } - want = `{"A":23}` - got = string(b) - if got != want { - t.Fatalf("Marshal: got %s want %s", got, want) - } -} - -type BugD struct { // Same as BugA after tagging. - XXX string `json:"S"` -} - -// BugD's tagged S field should dominate BugA's. -type BugY struct { - BugA - BugD -} - -// Test that a field with a tag dominates untagged fields. -func TestTaggedFieldDominates(t *testing.T) { - v := BugY{ - BugA{"BugA"}, - BugD{"BugD"}, - } - b, err := Marshal(v) - if err != nil { - t.Fatal("Marshal:", err) - } - want := `{"S":"BugD"}` - got := string(b) - if got != want { - t.Fatalf("Marshal: got %s want %s", got, want) - } -} - -// There are no tags here, so S should not appear. -type BugZ struct { - BugA - BugC - BugY // Contains a tagged S field through BugD; should not dominate. -} - -func TestDuplicatedFieldDisappears(t *testing.T) { - v := BugZ{ - BugA{"BugA"}, - BugC{"BugC"}, - BugY{ - BugA{"nested BugA"}, - BugD{"nested BugD"}, - }, - } - b, err := Marshal(v) - if err != nil { - t.Fatal("Marshal:", err) - } - want := `{}` - got := string(b) - if got != want { - t.Fatalf("Marshal: got %s want %s", got, want) - } -} - -func TestStringBytes(t *testing.T) { - // Test that encodeState.stringBytes and encodeState.string use the same encoding. - es := &encodeState{} - var r []rune - for i := '\u0000'; i <= unicode.MaxRune; i++ { - r = append(r, i) - } - s := string(r) + "\xff\xff\xffhello" // some invalid UTF-8 too - es.string(s) - - esBytes := &encodeState{} - esBytes.stringBytes([]byte(s)) - - enc := es.Buffer.String() - encBytes := esBytes.Buffer.String() - if enc != encBytes { - i := 0 - for i < len(enc) && i < len(encBytes) && enc[i] == encBytes[i] { - i++ - } - enc = enc[i:] - encBytes = encBytes[i:] - i = 0 - for i < len(enc) && i < len(encBytes) && enc[len(enc)-i-1] == encBytes[len(encBytes)-i-1] { - i++ - } - enc = enc[:len(enc)-i] - encBytes = encBytes[:len(encBytes)-i] - - if len(enc) > 20 { - enc = enc[:20] + "..." - } - if len(encBytes) > 20 { - encBytes = encBytes[:20] + "..." - } - - t.Errorf("encodings differ at %#q vs %#q", enc, encBytes) - } -} - -func TestIssue6458(t *testing.T) { - type Foo struct { - M RawMessage - } - x := Foo{RawMessage(`"foo"`)} - - b, err := Marshal(&x) - if err != nil { - t.Fatal(err) - } - if want := `{"M":"foo"}`; string(b) != want { - t.Errorf("Marshal(&x) = %#q; want %#q", b, want) - } - - b, err = Marshal(x) - if err != nil { - t.Fatal(err) - } - - if want := `{"M":"ImZvbyI="}`; string(b) != want { - t.Errorf("Marshal(x) = %#q; want %#q", b, want) - } -} - -func TestIssue10281(t *testing.T) { - type Foo struct { - N Number - } - x := Foo{Number(`invalid`)} - - b, err := Marshal(&x) - if err == nil { - t.Errorf("Marshal(&x) = %#q; want error", b) - } -} - -func TestHTMLEscape(t *testing.T) { - var b, want bytes.Buffer - m := `{"M":"foo &` + "\xe2\x80\xa8 \xe2\x80\xa9" + `"}` - want.Write([]byte(`{"M":"\u003chtml\u003efoo \u0026\u2028 \u2029\u003c/html\u003e"}`)) - HTMLEscape(&b, []byte(m)) - if !bytes.Equal(b.Bytes(), want.Bytes()) { - t.Errorf("HTMLEscape(&b, []byte(m)) = %s; want %s", b.Bytes(), want.Bytes()) - } -} - -// golang.org/issue/8582 -func TestEncodePointerString(t *testing.T) { - type stringPointer struct { - N *int64 `json:"n,string"` - } - var n int64 = 42 - b, err := Marshal(stringPointer{N: &n}) - if err != nil { - t.Fatalf("Marshal: %v", err) - } - if got, want := string(b), `{"n":"42"}`; got != want { - t.Errorf("Marshal = %s, want %s", got, want) - } - var back stringPointer - err = Unmarshal(b, &back) - if err != nil { - t.Fatalf("Unmarshal: %v", err) - } - if back.N == nil { - t.Fatalf("Unmarshalled nil N field") - } - if *back.N != 42 { - t.Fatalf("*N = %d; want 42", *back.N) - } -} - -var encodeStringTests = []struct { - in string - out string -}{ - {"\x00", `"\u0000"`}, - {"\x01", `"\u0001"`}, - {"\x02", `"\u0002"`}, - {"\x03", `"\u0003"`}, - {"\x04", `"\u0004"`}, - {"\x05", `"\u0005"`}, - {"\x06", `"\u0006"`}, - {"\x07", `"\u0007"`}, - {"\x08", `"\u0008"`}, - {"\x09", `"\t"`}, - {"\x0a", `"\n"`}, - {"\x0b", `"\u000b"`}, - {"\x0c", `"\u000c"`}, - {"\x0d", `"\r"`}, - {"\x0e", `"\u000e"`}, - {"\x0f", `"\u000f"`}, - {"\x10", `"\u0010"`}, - {"\x11", `"\u0011"`}, - {"\x12", `"\u0012"`}, - {"\x13", `"\u0013"`}, - {"\x14", `"\u0014"`}, - {"\x15", `"\u0015"`}, - {"\x16", `"\u0016"`}, - {"\x17", `"\u0017"`}, - {"\x18", `"\u0018"`}, - {"\x19", `"\u0019"`}, - {"\x1a", `"\u001a"`}, - {"\x1b", `"\u001b"`}, - {"\x1c", `"\u001c"`}, - {"\x1d", `"\u001d"`}, - {"\x1e", `"\u001e"`}, - {"\x1f", `"\u001f"`}, -} - -func TestEncodeString(t *testing.T) { - for _, tt := range encodeStringTests { - b, err := Marshal(tt.in) - if err != nil { - t.Errorf("Marshal(%q): %v", tt.in, err) - continue - } - out := string(b) - if out != tt.out { - t.Errorf("Marshal(%q) = %#q, want %#q", tt.in, out, tt.out) - } - } -} diff --git a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/number_test.go b/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/number_test.go deleted file mode 100644 index 4e63cf9c..00000000 --- a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/number_test.go +++ /dev/null @@ -1,133 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package json - -import ( - "regexp" - "testing" -) - -func TestNumberIsValid(t *testing.T) { - // From: http://stackoverflow.com/a/13340826 - var jsonNumberRegexp = regexp.MustCompile(`^-?(?:0|[1-9]\d*)(?:\.\d+)?(?:[eE][+-]?\d+)?$`) - - validTests := []string{ - "0", - "-0", - "1", - "-1", - "0.1", - "-0.1", - "1234", - "-1234", - "12.34", - "-12.34", - "12E0", - "12E1", - "12e34", - "12E-0", - "12e+1", - "12e-34", - "-12E0", - "-12E1", - "-12e34", - "-12E-0", - "-12e+1", - "-12e-34", - "1.2E0", - "1.2E1", - "1.2e34", - "1.2E-0", - "1.2e+1", - "1.2e-34", - "-1.2E0", - "-1.2E1", - "-1.2e34", - "-1.2E-0", - "-1.2e+1", - "-1.2e-34", - "0E0", - "0E1", - "0e34", - "0E-0", - "0e+1", - "0e-34", - "-0E0", - "-0E1", - "-0e34", - "-0E-0", - "-0e+1", - "-0e-34", - } - - for _, test := range validTests { - if !isValidNumber(test) { - t.Errorf("%s should be valid", test) - } - - var f float64 - if err := Unmarshal([]byte(test), &f); err != nil { - t.Errorf("%s should be valid but Unmarshal failed: %v", test, err) - } - - if !jsonNumberRegexp.MatchString(test) { - t.Errorf("%s should be valid but regexp does not match", test) - } - } - - invalidTests := []string{ - "", - "invalid", - "1.0.1", - "1..1", - "-1-2", - "012a42", - "01.2", - "012", - "12E12.12", - "1e2e3", - "1e+-2", - "1e--23", - "1e", - "e1", - "1e+", - "1ea", - "1a", - "1.a", - "1.", - "01", - "1.e1", - } - - for _, test := range invalidTests { - if isValidNumber(test) { - t.Errorf("%s should be invalid", test) - } - - var f float64 - if err := Unmarshal([]byte(test), &f); err == nil { - t.Errorf("%s should be invalid but unmarshal wrote %v", test, f) - } - - if jsonNumberRegexp.MatchString(test) { - t.Errorf("%s should be invalid but matches regexp", test) - } - } -} - -func BenchmarkNumberIsValid(b *testing.B) { - s := "-61657.61667E+61673" - for i := 0; i < b.N; i++ { - isValidNumber(s) - } -} - -func BenchmarkNumberIsValidRegexp(b *testing.B) { - var jsonNumberRegexp = regexp.MustCompile(`^-?(?:0|[1-9]\d*)(?:\.\d+)?(?:[eE][+-]?\d+)?$`) - s := "-61657.61667E+61673" - for i := 0; i < b.N; i++ { - jsonNumberRegexp.MatchString(s) - } -} diff --git a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/scanner_test.go b/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/scanner_test.go deleted file mode 100644 index 66383ef0..00000000 --- a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/scanner_test.go +++ /dev/null @@ -1,316 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package json - -import ( - "bytes" - "math" - "math/rand" - "reflect" - "testing" -) - -// Tests of simple examples. - -type example struct { - compact string - indent string -} - -var examples = []example{ - {`1`, `1`}, - {`{}`, `{}`}, - {`[]`, `[]`}, - {`{"":2}`, "{\n\t\"\": 2\n}"}, - {`[3]`, "[\n\t3\n]"}, - {`[1,2,3]`, "[\n\t1,\n\t2,\n\t3\n]"}, - {`{"x":1}`, "{\n\t\"x\": 1\n}"}, - {ex1, ex1i}, -} - -var ex1 = `[true,false,null,"x",1,1.5,0,-5e+2]` - -var ex1i = `[ - true, - false, - null, - "x", - 1, - 1.5, - 0, - -5e+2 -]` - -func TestCompact(t *testing.T) { - var buf bytes.Buffer - for _, tt := range examples { - buf.Reset() - if err := Compact(&buf, []byte(tt.compact)); err != nil { - t.Errorf("Compact(%#q): %v", tt.compact, err) - } else if s := buf.String(); s != tt.compact { - t.Errorf("Compact(%#q) = %#q, want original", tt.compact, s) - } - - buf.Reset() - if err := Compact(&buf, []byte(tt.indent)); err != nil { - t.Errorf("Compact(%#q): %v", tt.indent, err) - continue - } else if s := buf.String(); s != tt.compact { - t.Errorf("Compact(%#q) = %#q, want %#q", tt.indent, s, tt.compact) - } - } -} - -func TestCompactSeparators(t *testing.T) { - // U+2028 and U+2029 should be escaped inside strings. - // They should not appear outside strings. - tests := []struct { - in, compact string - }{ - {"{\"\u2028\": 1}", `{"\u2028":1}`}, - {"{\"\u2029\" :2}", `{"\u2029":2}`}, - } - for _, tt := range tests { - var buf bytes.Buffer - if err := Compact(&buf, []byte(tt.in)); err != nil { - t.Errorf("Compact(%q): %v", tt.in, err) - } else if s := buf.String(); s != tt.compact { - t.Errorf("Compact(%q) = %q, want %q", tt.in, s, tt.compact) - } - } -} - -func TestIndent(t *testing.T) { - var buf bytes.Buffer - for _, tt := range examples { - buf.Reset() - if err := Indent(&buf, []byte(tt.indent), "", "\t"); err != nil { - t.Errorf("Indent(%#q): %v", tt.indent, err) - } else if s := buf.String(); s != tt.indent { - t.Errorf("Indent(%#q) = %#q, want original", tt.indent, s) - } - - buf.Reset() - if err := Indent(&buf, []byte(tt.compact), "", "\t"); err != nil { - t.Errorf("Indent(%#q): %v", tt.compact, err) - continue - } else if s := buf.String(); s != tt.indent { - t.Errorf("Indent(%#q) = %#q, want %#q", tt.compact, s, tt.indent) - } - } -} - -// Tests of a large random structure. - -func TestCompactBig(t *testing.T) { - initBig() - var buf bytes.Buffer - if err := Compact(&buf, jsonBig); err != nil { - t.Fatalf("Compact: %v", err) - } - b := buf.Bytes() - if !bytes.Equal(b, jsonBig) { - t.Error("Compact(jsonBig) != jsonBig") - diff(t, b, jsonBig) - return - } -} - -func TestIndentBig(t *testing.T) { - initBig() - var buf bytes.Buffer - if err := Indent(&buf, jsonBig, "", "\t"); err != nil { - t.Fatalf("Indent1: %v", err) - } - b := buf.Bytes() - if len(b) == len(jsonBig) { - // jsonBig is compact (no unnecessary spaces); - // indenting should make it bigger - t.Fatalf("Indent(jsonBig) did not get bigger") - } - - // should be idempotent - var buf1 bytes.Buffer - if err := Indent(&buf1, b, "", "\t"); err != nil { - t.Fatalf("Indent2: %v", err) - } - b1 := buf1.Bytes() - if !bytes.Equal(b1, b) { - t.Error("Indent(Indent(jsonBig)) != Indent(jsonBig)") - diff(t, b1, b) - return - } - - // should get back to original - buf1.Reset() - if err := Compact(&buf1, b); err != nil { - t.Fatalf("Compact: %v", err) - } - b1 = buf1.Bytes() - if !bytes.Equal(b1, jsonBig) { - t.Error("Compact(Indent(jsonBig)) != jsonBig") - diff(t, b1, jsonBig) - return - } -} - -type indentErrorTest struct { - in string - err error -} - -var indentErrorTests = []indentErrorTest{ - {`{"X": "foo", "Y"}`, &SyntaxError{"invalid character '}' after object key", 17}}, - {`{"X": "foo" "Y": "bar"}`, &SyntaxError{"invalid character '\"' after object key:value pair", 13}}, -} - -func TestIndentErrors(t *testing.T) { - for i, tt := range indentErrorTests { - slice := make([]uint8, 0) - buf := bytes.NewBuffer(slice) - if err := Indent(buf, []uint8(tt.in), "", ""); err != nil { - if !reflect.DeepEqual(err, tt.err) { - t.Errorf("#%d: Indent: %#v", i, err) - continue - } - } - } -} - -func TestNextValueBig(t *testing.T) { - initBig() - var scan scanner - item, rest, err := nextValue(jsonBig, &scan) - if err != nil { - t.Fatalf("nextValue: %s", err) - } - if len(item) != len(jsonBig) || &item[0] != &jsonBig[0] { - t.Errorf("invalid item: %d %d", len(item), len(jsonBig)) - } - if len(rest) != 0 { - t.Errorf("invalid rest: %d", len(rest)) - } - - item, rest, err = nextValue(append(jsonBig, "HELLO WORLD"...), &scan) - if err != nil { - t.Fatalf("nextValue extra: %s", err) - } - if len(item) != len(jsonBig) { - t.Errorf("invalid item: %d %d", len(item), len(jsonBig)) - } - if string(rest) != "HELLO WORLD" { - t.Errorf("invalid rest: %d", len(rest)) - } -} - -var benchScan scanner - -func BenchmarkSkipValue(b *testing.B) { - initBig() - b.ResetTimer() - for i := 0; i < b.N; i++ { - nextValue(jsonBig, &benchScan) - } - b.SetBytes(int64(len(jsonBig))) -} - -func diff(t *testing.T, a, b []byte) { - for i := 0; ; i++ { - if i >= len(a) || i >= len(b) || a[i] != b[i] { - j := i - 10 - if j < 0 { - j = 0 - } - t.Errorf("diverge at %d: «%s» vs «%s»", i, trim(a[j:]), trim(b[j:])) - return - } - } -} - -func trim(b []byte) []byte { - if len(b) > 20 { - return b[0:20] - } - return b -} - -// Generate a random JSON object. - -var jsonBig []byte - -func initBig() { - n := 10000 - if testing.Short() { - n = 100 - } - b, err := Marshal(genValue(n)) - if err != nil { - panic(err) - } - jsonBig = b -} - -func genValue(n int) interface{} { - if n > 1 { - switch rand.Intn(2) { - case 0: - return genArray(n) - case 1: - return genMap(n) - } - } - switch rand.Intn(3) { - case 0: - return rand.Intn(2) == 0 - case 1: - return rand.NormFloat64() - case 2: - return genString(30) - } - panic("unreachable") -} - -func genString(stddev float64) string { - n := int(math.Abs(rand.NormFloat64()*stddev + stddev/2)) - c := make([]rune, n) - for i := range c { - f := math.Abs(rand.NormFloat64()*64 + 32) - if f > 0x10ffff { - f = 0x10ffff - } - c[i] = rune(f) - } - return string(c) -} - -func genArray(n int) []interface{} { - f := int(math.Abs(rand.NormFloat64()) * math.Min(10, float64(n/2))) - if f > n { - f = n - } - if f < 1 { - f = 1 - } - x := make([]interface{}, f) - for i := range x { - x[i] = genValue(((i+1)*n)/f - (i*n)/f) - } - return x -} - -func genMap(n int) map[string]interface{} { - f := int(math.Abs(rand.NormFloat64()) * math.Min(10, float64(n/2))) - if f > n { - f = n - } - if n > 0 && f == 0 { - f = 1 - } - x := make(map[string]interface{}) - for i := 0; i < f; i++ { - x[genString(10)] = genValue(((i+1)*n)/f - (i*n)/f) - } - return x -} diff --git a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/stream_test.go b/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/stream_test.go deleted file mode 100644 index eccf365b..00000000 --- a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/stream_test.go +++ /dev/null @@ -1,354 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package json - -import ( - "bytes" - "io" - "io/ioutil" - "log" - "net" - "net/http" - "net/http/httptest" - "reflect" - "strings" - "testing" -) - -// Test values for the stream test. -// One of each JSON kind. -var streamTest = []interface{}{ - 0.1, - "hello", - nil, - true, - false, - []interface{}{"a", "b", "c"}, - map[string]interface{}{"K": "Kelvin", "ß": "long s"}, - 3.14, // another value to make sure something can follow map -} - -var streamEncoded = `0.1 -"hello" -null -true -false -["a","b","c"] -{"ß":"long s","K":"Kelvin"} -3.14 -` - -func TestEncoder(t *testing.T) { - for i := 0; i <= len(streamTest); i++ { - var buf bytes.Buffer - enc := NewEncoder(&buf) - for j, v := range streamTest[0:i] { - if err := enc.Encode(v); err != nil { - t.Fatalf("encode #%d: %v", j, err) - } - } - if have, want := buf.String(), nlines(streamEncoded, i); have != want { - t.Errorf("encoding %d items: mismatch", i) - diff(t, []byte(have), []byte(want)) - break - } - } -} - -func TestDecoder(t *testing.T) { - for i := 0; i <= len(streamTest); i++ { - // Use stream without newlines as input, - // just to stress the decoder even more. - // Our test input does not include back-to-back numbers. - // Otherwise stripping the newlines would - // merge two adjacent JSON values. - var buf bytes.Buffer - for _, c := range nlines(streamEncoded, i) { - if c != '\n' { - buf.WriteRune(c) - } - } - out := make([]interface{}, i) - dec := NewDecoder(&buf) - for j := range out { - if err := dec.Decode(&out[j]); err != nil { - t.Fatalf("decode #%d/%d: %v", j, i, err) - } - } - if !reflect.DeepEqual(out, streamTest[0:i]) { - t.Errorf("decoding %d items: mismatch", i) - for j := range out { - if !reflect.DeepEqual(out[j], streamTest[j]) { - t.Errorf("#%d: have %v want %v", j, out[j], streamTest[j]) - } - } - break - } - } -} - -func TestDecoderBuffered(t *testing.T) { - r := strings.NewReader(`{"Name": "Gopher"} extra `) - var m struct { - Name string - } - d := NewDecoder(r) - err := d.Decode(&m) - if err != nil { - t.Fatal(err) - } - if m.Name != "Gopher" { - t.Errorf("Name = %q; want Gopher", m.Name) - } - rest, err := ioutil.ReadAll(d.Buffered()) - if err != nil { - t.Fatal(err) - } - if g, w := string(rest), " extra "; g != w { - t.Errorf("Remaining = %q; want %q", g, w) - } -} - -func nlines(s string, n int) string { - if n <= 0 { - return "" - } - for i, c := range s { - if c == '\n' { - if n--; n == 0 { - return s[0 : i+1] - } - } - } - return s -} - -func TestRawMessage(t *testing.T) { - // TODO(rsc): Should not need the * in *RawMessage - var data struct { - X float64 - Id *RawMessage - Y float32 - } - const raw = `["\u0056",null]` - const msg = `{"X":0.1,"Id":["\u0056",null],"Y":0.2}` - err := Unmarshal([]byte(msg), &data) - if err != nil { - t.Fatalf("Unmarshal: %v", err) - } - if string([]byte(*data.Id)) != raw { - t.Fatalf("Raw mismatch: have %#q want %#q", []byte(*data.Id), raw) - } - b, err := Marshal(&data) - if err != nil { - t.Fatalf("Marshal: %v", err) - } - if string(b) != msg { - t.Fatalf("Marshal: have %#q want %#q", b, msg) - } -} - -func TestNullRawMessage(t *testing.T) { - // TODO(rsc): Should not need the * in *RawMessage - var data struct { - X float64 - Id *RawMessage - Y float32 - } - data.Id = new(RawMessage) - const msg = `{"X":0.1,"Id":null,"Y":0.2}` - err := Unmarshal([]byte(msg), &data) - if err != nil { - t.Fatalf("Unmarshal: %v", err) - } - if data.Id != nil { - t.Fatalf("Raw mismatch: have non-nil, want nil") - } - b, err := Marshal(&data) - if err != nil { - t.Fatalf("Marshal: %v", err) - } - if string(b) != msg { - t.Fatalf("Marshal: have %#q want %#q", b, msg) - } -} - -var blockingTests = []string{ - `{"x": 1}`, - `[1, 2, 3]`, -} - -func TestBlocking(t *testing.T) { - for _, enc := range blockingTests { - r, w := net.Pipe() - go w.Write([]byte(enc)) - var val interface{} - - // If Decode reads beyond what w.Write writes above, - // it will block, and the test will deadlock. - if err := NewDecoder(r).Decode(&val); err != nil { - t.Errorf("decoding %s: %v", enc, err) - } - r.Close() - w.Close() - } -} - -func BenchmarkEncoderEncode(b *testing.B) { - b.ReportAllocs() - type T struct { - X, Y string - } - v := &T{"foo", "bar"} - for i := 0; i < b.N; i++ { - if err := NewEncoder(ioutil.Discard).Encode(v); err != nil { - b.Fatal(err) - } - } -} - -type tokenStreamCase struct { - json string - expTokens []interface{} -} - -type decodeThis struct { - v interface{} -} - -var tokenStreamCases []tokenStreamCase = []tokenStreamCase{ - // streaming token cases - {json: `10`, expTokens: []interface{}{float64(10)}}, - {json: ` [10] `, expTokens: []interface{}{ - Delim('['), float64(10), Delim(']')}}, - {json: ` [false,10,"b"] `, expTokens: []interface{}{ - Delim('['), false, float64(10), "b", Delim(']')}}, - {json: `{ "a": 1 }`, expTokens: []interface{}{ - Delim('{'), "a", float64(1), Delim('}')}}, - {json: `{"a": 1, "b":"3"}`, expTokens: []interface{}{ - Delim('{'), "a", float64(1), "b", "3", Delim('}')}}, - {json: ` [{"a": 1},{"a": 2}] `, expTokens: []interface{}{ - Delim('['), - Delim('{'), "a", float64(1), Delim('}'), - Delim('{'), "a", float64(2), Delim('}'), - Delim(']')}}, - {json: `{"obj": {"a": 1}}`, expTokens: []interface{}{ - Delim('{'), "obj", Delim('{'), "a", float64(1), Delim('}'), - Delim('}')}}, - {json: `{"obj": [{"a": 1}]}`, expTokens: []interface{}{ - Delim('{'), "obj", Delim('['), - Delim('{'), "a", float64(1), Delim('}'), - Delim(']'), Delim('}')}}, - - // streaming tokens with intermittent Decode() - {json: `{ "a": 1 }`, expTokens: []interface{}{ - Delim('{'), "a", - decodeThis{float64(1)}, - Delim('}')}}, - {json: ` [ { "a" : 1 } ] `, expTokens: []interface{}{ - Delim('['), - decodeThis{map[string]interface{}{"a": float64(1)}}, - Delim(']')}}, - {json: ` [{"a": 1},{"a": 2}] `, expTokens: []interface{}{ - Delim('['), - decodeThis{map[string]interface{}{"a": float64(1)}}, - decodeThis{map[string]interface{}{"a": float64(2)}}, - Delim(']')}}, - {json: `{ "obj" : [ { "a" : 1 } ] }`, expTokens: []interface{}{ - Delim('{'), "obj", Delim('['), - decodeThis{map[string]interface{}{"a": float64(1)}}, - Delim(']'), Delim('}')}}, - - {json: `{"obj": {"a": 1}}`, expTokens: []interface{}{ - Delim('{'), "obj", - decodeThis{map[string]interface{}{"a": float64(1)}}, - Delim('}')}}, - {json: `{"obj": [{"a": 1}]}`, expTokens: []interface{}{ - Delim('{'), "obj", - decodeThis{[]interface{}{ - map[string]interface{}{"a": float64(1)}, - }}, - Delim('}')}}, - {json: ` [{"a": 1} {"a": 2}] `, expTokens: []interface{}{ - Delim('['), - decodeThis{map[string]interface{}{"a": float64(1)}}, - decodeThis{&SyntaxError{"expected comma after array element", 0}}, - }}, - {json: `{ "a" 1 }`, expTokens: []interface{}{ - Delim('{'), "a", - decodeThis{&SyntaxError{"expected colon after object key", 0}}, - }}, -} - -func TestDecodeInStream(t *testing.T) { - - for ci, tcase := range tokenStreamCases { - - dec := NewDecoder(strings.NewReader(tcase.json)) - for i, etk := range tcase.expTokens { - - var tk interface{} - var err error - - if dt, ok := etk.(decodeThis); ok { - etk = dt.v - err = dec.Decode(&tk) - } else { - tk, err = dec.Token() - } - if experr, ok := etk.(error); ok { - if err == nil || err.Error() != experr.Error() { - t.Errorf("case %v: Expected error %v in %q, but was %v", ci, experr, tcase.json, err) - } - break - } else if err == io.EOF { - t.Errorf("case %v: Unexpected EOF in %q", ci, tcase.json) - break - } else if err != nil { - t.Errorf("case %v: Unexpected error '%v' in %q", ci, err, tcase.json) - break - } - if !reflect.DeepEqual(tk, etk) { - t.Errorf(`case %v: %q @ %v expected %T(%v) was %T(%v)`, ci, tcase.json, i, etk, etk, tk, tk) - break - } - } - } - -} - -// Test from golang.org/issue/11893 -func TestHTTPDecoding(t *testing.T) { - const raw = `{ "foo": "bar" }` - - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Write([]byte(raw)) - })) - defer ts.Close() - res, err := http.Get(ts.URL) - if err != nil { - log.Fatalf("GET failed: %v", err) - } - defer res.Body.Close() - - foo := struct { - Foo string `json:"foo"` - }{} - - d := NewDecoder(res.Body) - err = d.Decode(&foo) - if err != nil { - t.Fatalf("Decode: %v", err) - } - if foo.Foo != "bar" { - t.Errorf("decoded %q; want \"bar\"", foo.Foo) - } - - // make sure we get the EOF the second time - err = d.Decode(&foo) - if err != io.EOF { - t.Errorf("err = %v; want io.EOF", err) - } -} diff --git a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/tagkey_test.go b/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/tagkey_test.go deleted file mode 100644 index 85bb4ba8..00000000 --- a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/tagkey_test.go +++ /dev/null @@ -1,115 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package json - -import ( - "testing" -) - -type basicLatin2xTag struct { - V string `json:"$%-/"` -} - -type basicLatin3xTag struct { - V string `json:"0123456789"` -} - -type basicLatin4xTag struct { - V string `json:"ABCDEFGHIJKLMO"` -} - -type basicLatin5xTag struct { - V string `json:"PQRSTUVWXYZ_"` -} - -type basicLatin6xTag struct { - V string `json:"abcdefghijklmno"` -} - -type basicLatin7xTag struct { - V string `json:"pqrstuvwxyz"` -} - -type miscPlaneTag struct { - V string `json:"色は匂へど"` -} - -type percentSlashTag struct { - V string `json:"text/html%"` // https://golang.org/issue/2718 -} - -type punctuationTag struct { - V string `json:"!#$%&()*+-./:<=>?@[]^_{|}~"` // https://golang.org/issue/3546 -} - -type emptyTag struct { - W string -} - -type misnamedTag struct { - X string `jsom:"Misnamed"` -} - -type badFormatTag struct { - Y string `:"BadFormat"` -} - -type badCodeTag struct { - Z string `json:" !\"#&'()*+,."` -} - -type spaceTag struct { - Q string `json:"With space"` -} - -type unicodeTag struct { - W string `json:"Ελλάδα"` -} - -var structTagObjectKeyTests = []struct { - raw interface{} - value string - key string -}{ - {basicLatin2xTag{"2x"}, "2x", "$%-/"}, - {basicLatin3xTag{"3x"}, "3x", "0123456789"}, - {basicLatin4xTag{"4x"}, "4x", "ABCDEFGHIJKLMO"}, - {basicLatin5xTag{"5x"}, "5x", "PQRSTUVWXYZ_"}, - {basicLatin6xTag{"6x"}, "6x", "abcdefghijklmno"}, - {basicLatin7xTag{"7x"}, "7x", "pqrstuvwxyz"}, - {miscPlaneTag{"いろはにほへと"}, "いろはにほへと", "色は匂へど"}, - {emptyTag{"Pour Moi"}, "Pour Moi", "W"}, - {misnamedTag{"Animal Kingdom"}, "Animal Kingdom", "X"}, - {badFormatTag{"Orfevre"}, "Orfevre", "Y"}, - {badCodeTag{"Reliable Man"}, "Reliable Man", "Z"}, - {percentSlashTag{"brut"}, "brut", "text/html%"}, - {punctuationTag{"Union Rags"}, "Union Rags", "!#$%&()*+-./:<=>?@[]^_{|}~"}, - {spaceTag{"Perreddu"}, "Perreddu", "With space"}, - {unicodeTag{"Loukanikos"}, "Loukanikos", "Ελλάδα"}, -} - -func TestStructTagObjectKey(t *testing.T) { - for _, tt := range structTagObjectKeyTests { - b, err := Marshal(tt.raw) - if err != nil { - t.Fatalf("Marshal(%#q) failed: %v", tt.raw, err) - } - var f interface{} - err = Unmarshal(b, &f) - if err != nil { - t.Fatalf("Unmarshal(%#q) failed: %v", b, err) - } - for i, v := range f.(map[string]interface{}) { - switch i { - case tt.key: - if s, ok := v.(string); !ok || s != tt.value { - t.Fatalf("Unexpected value: %#q, want %v", s, tt.value) - } - default: - t.Fatalf("Unexpected key: %#q, from %#q", i, b) - } - } - } -} diff --git a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/tags_test.go b/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/tags_test.go deleted file mode 100644 index 91fb1883..00000000 --- a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/tags_test.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package json - -import ( - "testing" -) - -func TestTagParsing(t *testing.T) { - name, opts := parseTag("field,foobar,foo") - if name != "field" { - t.Fatalf("name = %q, want field", name) - } - for _, tt := range []struct { - opt string - want bool - }{ - {"foobar", true}, - {"foo", true}, - {"bar", false}, - } { - if opts.Contains(tt.opt) != tt.want { - t.Errorf("Contains(%q) = %v", tt.opt, !tt.want) - } - } -} diff --git a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/testdata/code.json.gz b/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/testdata/code.json.gz deleted file mode 100644 index 0e2895b5..00000000 Binary files a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json/testdata/code.json.gz and /dev/null differ diff --git a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json_fork_test.go b/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json_fork_test.go deleted file mode 100644 index 78e59dc0..00000000 --- a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json_fork_test.go +++ /dev/null @@ -1,76 +0,0 @@ -// +build !std_json - -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package jose - -import ( - "testing" -) - -type CaseSensitive struct { - A int `json:"Test"` - B int `json:"test"` - C int `json:"TEST"` -} - -func TestCaseSensitiveJSON(t *testing.T) { - raw := []byte(`{"test":42}`) - var cs CaseSensitive - err := UnmarshalJSON(raw, &cs) - if err != nil { - t.Error(err) - } - - if cs.A != 0 || cs.B != 42 || cs.C != 0 { - t.Errorf("parsing JSON should be case-sensitive (got %v)", cs) - } -} - -func TestRejectDuplicateKeysObject(t *testing.T) { - raw := []byte(`{"test":42,"test":43}`) - var cs CaseSensitive - err := UnmarshalJSON(raw, &cs) - if err == nil { - t.Error("should reject JSON with duplicate keys, but didn't") - } -} - -func TestRejectDuplicateKeysInterface(t *testing.T) { - raw := []byte(`{"test":42,"test":43}`) - var m interface{} - err := UnmarshalJSON(raw, &m) - if err == nil { - t.Error("should reject JSON with duplicate keys, but didn't") - } -} - -func TestParseCaseSensitiveJWE(t *testing.T) { - invalidJWE := `{"protected":"eyJlbmMiOiJYWVoiLCJBTEciOiJYWVoifQo","encrypted_key":"QUJD","iv":"QUJD","ciphertext":"QUJD","tag":"QUJD"}` - _, err := ParseEncrypted(invalidJWE) - if err == nil { - t.Error("Able to parse message with case-invalid headers", invalidJWE) - } -} - -func TestParseCaseSensitiveJWS(t *testing.T) { - invalidJWS := `{"PAYLOAD":"CUJD","signatures":[{"protected":"e30","signature":"CUJD"}]}` - _, err := ParseSigned(invalidJWS) - if err == nil { - t.Error("Able to parse message with case-invalid headers", invalidJWS) - } -} diff --git a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json_std_test.go b/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json_std_test.go deleted file mode 100644 index 9819a614..00000000 --- a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json_std_test.go +++ /dev/null @@ -1,106 +0,0 @@ -// +build std_json - -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package jose - -import ( - "testing" -) - -type CaseInsensitive struct { - A int `json:"TEST"` -} - -func TestCaseInsensitiveJSON(t *testing.T) { - raw := []byte(`{"test":42}`) - var ci CaseInsensitive - err := UnmarshalJSON(raw, &ci) - if err != nil { - t.Error(err) - } - - if ci.A != 42 { - t.Errorf("parsing JSON should be case-insensitive (got %v)", ci) - } -} - -func TestParseCaseInsensitiveJWE(t *testing.T) { - invalidJWE := `{"protected":"eyJlbmMiOiJYWVoiLCJBTEciOiJYWVoifQo","encrypted_key":"QUJD","iv":"QUJD","ciphertext":"QUJD","tag":"QUJD"}` - _, err := ParseEncrypted(invalidJWE) - if err != nil { - t.Error("Unable to parse message with case-invalid headers", invalidJWE) - } -} - -func TestParseCaseInsensitiveJWS(t *testing.T) { - invalidJWS := `{"PAYLOAD":"CUJD","signatures":[{"protected":"e30","signature":"CUJD"}]}` - _, err := ParseSigned(invalidJWS) - if err != nil { - t.Error("Unable to parse message with case-invalid headers", invalidJWS) - } -} - -var JWKSetDuplicates = stripWhitespace(`{ - "keys": [{ - "kty": "RSA", - "kid": "exclude-me", - "use": "sig", - "n": "n4EPtAOCc9AlkeQHPzHStgAbgs7bTZLwUBZdR8_KuKPEHLd4rHVTeT - -O-XV2jRojdNhxJWTDvNd7nqQ0VEiZQHz_AJmSCpMaJMRBSFKrKb2wqV - wGU_NsYOYL-QtiWN2lbzcEe6XC0dApr5ydQLrHqkHHig3RBordaZ6Aj- - oBHqFEHYpPe7Tpe-OfVfHd1E6cS6M1FZcD1NNLYD5lFHpPI9bTwJlsde - 3uhGqC0ZCuEHg8lhzwOHrtIQbS0FVbb9k3-tVTU4fg_3L_vniUFAKwuC - LqKnS2BYwdq_mzSnbLY7h_qixoR7jig3__kRhuaxwUkRz5iaiQkqgc5g - HdrNP5zw", - "e": "AQAB" - }], - "keys": [{ - "kty": "RSA", - "kid": "include-me", - "use": "sig", - "n": "n4EPtAOCc9AlkeQHPzHStgAbgs7bTZLwUBZdR8_KuKPEHLd4rHVTeT - -O-XV2jRojdNhxJWTDvNd7nqQ0VEiZQHz_AJmSCpMaJMRBSFKrKb2wqV - wGU_NsYOYL-QtiWN2lbzcEe6XC0dApr5ydQLrHqkHHig3RBordaZ6Aj- - oBHqFEHYpPe7Tpe-OfVfHd1E6cS6M1FZcD1NNLYD5lFHpPI9bTwJlsde - 3uhGqC0ZCuEHg8lhzwOHrtIQbS0FVbb9k3-tVTU4fg_3L_vniUFAKwuC - LqKnS2BYwdq_mzSnbLY7h_qixoR7jig3__kRhuaxwUkRz5iaiQkqgc5g - HdrNP5zw", - "e": "AQAB" - }], - "custom": "exclude-me", - "custom": "include-me" - }`) - -func TestDuplicateJWKSetMembersIgnored(t *testing.T) { - type CustomSet struct { - JsonWebKeySet - CustomMember string `json:"custom"` - } - data := []byte(JWKSetDuplicates) - var set CustomSet - UnmarshalJSON(data, &set) - if len(set.Keys) != 1 { - t.Error("expected only one key in set") - } - if set.Keys[0].KeyID != "include-me" { - t.Errorf("expected key with kid: \"include-me\", got: %s", set.Keys[0].KeyID) - } - if set.CustomMember != "include-me" { - t.Errorf("expected custom member value: \"include-me\", got: %s", set.CustomMember) - } -} diff --git a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/jwe_test.go b/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/jwe_test.go deleted file mode 100644 index ab03fd00..00000000 --- a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/jwe_test.go +++ /dev/null @@ -1,537 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package jose - -import ( - "bytes" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rsa" - "math/big" - "testing" -) - -func TestCompactParseJWE(t *testing.T) { - // Should parse - msg := "eyJhbGciOiJSU0EtT0FFUCIsImVuYyI6IkExMjhHQ00ifQ.dGVzdA.dGVzdA.dGVzdA.dGVzdA" - _, err := ParseEncrypted(msg) - if err != nil { - t.Error("Unable to parse valid message:", err) - } - - // Messages that should fail to parse - failures := []string{ - // Too many parts - "eyJhbGciOiJSU0EtT0FFUCIsImVuYyI6IkExMjhHQ00ifQ.dGVzdA.dGVzdA.dGVzdA.dGVzdA.dGVzdA", - // Not enough parts - "eyJhbGciOiJSU0EtT0FFUCIsImVuYyI6IkExMjhHQ00ifQ.dGVzdA.dGVzdA.dGVzdA", - // Invalid encrypted key - "eyJhbGciOiJSU0EtT0FFUCIsImVuYyI6IkExMjhHQ00ifQ.//////.dGVzdA.dGVzdA.dGVzdA", - // Invalid IV - "eyJhbGciOiJSU0EtT0FFUCIsImVuYyI6IkExMjhHQ00ifQ.dGVzdA.//////.dGVzdA.dGVzdA", - // Invalid ciphertext - "eyJhbGciOiJSU0EtT0FFUCIsImVuYyI6IkExMjhHQ00ifQ.dGVzdA.dGVzdA.//////.dGVzdA", - // Invalid tag - "eyJhbGciOiJSU0EtT0FFUCIsImVuYyI6IkExMjhHQ00ifQ.dGVzdA.dGVzdA.dGVzdA.//////", - // Invalid header - "W10.dGVzdA.dGVzdA.dGVzdA.dGVzdA", - // Invalid header - "######.dGVzdA.dGVzdA.dGVzdA.dGVzdA", - // Missing alc/enc params - "e30.dGVzdA.dGVzdA.dGVzdA.dGVzdA", - } - - for _, msg := range failures { - _, err = ParseEncrypted(msg) - if err == nil { - t.Error("Able to parse invalid message", msg) - } - } -} - -func TestFullParseJWE(t *testing.T) { - // Messages that should succeed to parse - successes := []string{ - // Flattened serialization, single recipient - "{\"protected\":\"eyJhbGciOiJYWVoiLCJlbmMiOiJYWVoifQo\",\"encrypted_key\":\"QUJD\",\"iv\":\"QUJD\",\"ciphertext\":\"QUJD\",\"tag\":\"QUJD\"}", - // Unflattened serialization, single recipient - "{\"protected\":\"\",\"unprotected\":{\"enc\":\"XYZ\"},\"recipients\":[{\"header\":{\"alg\":\"XYZ\"},\"encrypted_key\":\"QUJD\"}],\"iv\":\"QUJD\",\"ciphertext\":\"QUJD\",\"tag\":\"QUJD\"}", - } - - for i := range successes { - _, err := ParseEncrypted(successes[i]) - if err != nil { - t.Error("Unble to parse valid message", err, successes[i]) - } - } - - // Messages that should fail to parse - failures := []string{ - // Empty - "{}", - // Invalid JSON - "{XX", - // Invalid protected header - "{\"protected\":\"###\"}", - // Invalid protected header - "{\"protected\":\"e1gK\"}", - // Invalid encrypted key - "{\"protected\":\"e30\",\"encrypted_key\":\"###\"}", - // Invalid IV - "{\"protected\":\"e30\",\"encrypted_key\":\"QUJD\",\"iv\":\"###\"}", - // Invalid ciphertext - "{\"protected\":\"e30\",\"encrypted_key\":\"QUJD\",\"iv\":\"QUJD\",\"ciphertext\":\"###\"}", - // Invalid tag - "{\"protected\":\"e30\",\"encrypted_key\":\"QUJD\",\"iv\":\"QUJD\",\"ciphertext\":\"QUJD\",\"tag\":\"###\"}", - // Invalid AAD - "{\"protected\":\"e30\",\"encrypted_key\":\"QUJD\",\"iv\":\"QUJD\",\"ciphertext\":\"QUJD\",\"tag\":\"QUJD\",\"aad\":\"###\"}", - // Missing alg/enc headers - "{\"protected\":\"e30\",\"encrypted_key\":\"QUJD\",\"iv\":\"QUJD\",\"ciphertext\":\"QUJD\",\"tag\":\"QUJD\"}", - // Missing enc header - "{\"protected\":\"eyJhbGciOiJYWVoifQ\",\"encrypted_key\":\"QUJD\",\"iv\":\"QUJD\",\"ciphertext\":\"QUJD\",\"tag\":\"QUJD\"}", - // Missing alg header - "{\"protected\":\"eyJlbmMiOiJYWVoifQ\",\"encrypted_key\":\"QUJD\",\"iv\":\"QUJD\",\"ciphertext\":\"QUJD\",\"tag\":\"QUJD\"}", - // Unflattened serialization, single recipient, invalid encrypted_key - "{\"protected\":\"\",\"recipients\":[{\"header\":{\"alg\":\"XYZ\", \"enc\":\"XYZ\"},\"encrypted_key\":\"###\"}],\"iv\":\"QUJD\",\"ciphertext\":\"QUJD\",\"tag\":\"QUJD\"}", - // Unflattened serialization, single recipient, missing alg - "{\"protected\":\"eyJhbGciOiJYWVoifQ\",\"recipients\":[{\"encrypted_key\":\"QUJD\"}],\"iv\":\"QUJD\",\"ciphertext\":\"QUJD\",\"tag\":\"QUJD\"}", - } - - for i := range failures { - _, err := ParseEncrypted(failures[i]) - if err == nil { - t.Error("Able to parse invalid message", err, failures[i]) - } - } -} - -func TestMissingInvalidHeaders(t *testing.T) { - obj := &JsonWebEncryption{ - protected: &rawHeader{Enc: A128GCM}, - unprotected: &rawHeader{}, - recipients: []recipientInfo{ - recipientInfo{}, - }, - } - - _, err := obj.Decrypt(nil) - if err != ErrUnsupportedKeyType { - t.Error("should detect invalid key") - } - - obj.unprotected.Crit = []string{"1", "2"} - - _, err = obj.Decrypt(nil) - if err == nil { - t.Error("should reject message with crit header") - } - - obj.unprotected.Crit = nil - obj.protected = &rawHeader{Alg: string(RSA1_5)} - - _, err = obj.Decrypt(rsaTestKey) - if err == nil || err == ErrCryptoFailure { - t.Error("should detect missing enc header") - } -} - -func TestRejectUnprotectedJWENonce(t *testing.T) { - // No need to test compact, since that's always protected - - // Flattened JSON - input := `{ - "header": { - "alg": "XYZ", "enc": "XYZ", - "nonce": "should-cause-an-error" - }, - "encrypted_key": "does-not-matter", - "aad": "does-not-matter", - "iv": "does-not-matter", - "ciphertext": "does-not-matter", - "tag": "does-not-matter" - }` - _, err := ParseEncrypted(input) - if err == nil { - t.Error("JWE with an unprotected nonce parsed as valid.") - } else if err.Error() != "square/go-jose: Nonce parameter included in unprotected header" { - t.Errorf("Improper error for unprotected nonce: %v", err) - } - - input = `{ - "unprotected": { - "alg": "XYZ", "enc": "XYZ", - "nonce": "should-cause-an-error" - }, - "encrypted_key": "does-not-matter", - "aad": "does-not-matter", - "iv": "does-not-matter", - "ciphertext": "does-not-matter", - "tag": "does-not-matter" - }` - _, err = ParseEncrypted(input) - if err == nil { - t.Error("JWE with an unprotected nonce parsed as valid.") - } else if err.Error() != "square/go-jose: Nonce parameter included in unprotected header" { - t.Errorf("Improper error for unprotected nonce: %v", err) - } - - // Full JSON - input = `{ - "header": { "alg": "XYZ", "enc": "XYZ" }, - "aad": "does-not-matter", - "iv": "does-not-matter", - "ciphertext": "does-not-matter", - "tag": "does-not-matter", - "recipients": [{ - "header": { "nonce": "should-cause-an-error" }, - "encrypted_key": "does-not-matter" - }] - }` - _, err = ParseEncrypted(input) - if err == nil { - t.Error("JWS with an unprotected nonce parsed as valid.") - } else if err.Error() != "square/go-jose: Nonce parameter included in unprotected header" { - t.Errorf("Improper error for unprotected nonce: %v", err) - } -} - -func TestCompactSerialize(t *testing.T) { - // Compact serialization must fail if we have unprotected headers - obj := &JsonWebEncryption{ - unprotected: &rawHeader{Alg: "XYZ"}, - } - - _, err := obj.CompactSerialize() - if err == nil { - t.Error("Object with unprotected headers can't be compact serialized") - } -} - -func TestVectorsJWE(t *testing.T) { - plaintext := []byte("The true sign of intelligence is not knowledge but imagination.") - - publicKey := &rsa.PublicKey{ - N: fromBase64Int(` - oahUIoWw0K0usKNuOR6H4wkf4oBUXHTxRvgb48E-BVvxkeDNjbC4he8rUW - cJoZmds2h7M70imEVhRU5djINXtqllXI4DFqcI1DgjT9LewND8MW2Krf3S - psk_ZkoFnilakGygTwpZ3uesH-PFABNIUYpOiN15dsQRkgr0vEhxN92i2a - sbOenSZeyaxziK72UwxrrKoExv6kc5twXTq4h-QChLOln0_mtUZwfsRaMS - tPs6mS6XrgxnxbWhojf663tuEQueGC-FCMfra36C9knDFGzKsNa7LZK2dj - YgyD3JR_MB_4NUJW_TqOQtwHYbxevoJArm-L5StowjzGy-_bq6Gw`), - E: 65537, - } - - expectedCompact := stripWhitespace(` - eyJhbGciOiJSU0EtT0FFUCIsImVuYyI6IkEyNTZHQ00ifQ.ROQCfge4JPm_ - yACxv1C1NSXmwNbL6kvmCuyxBRGpW57DvlwByjyjsb6g8m7wtLMqKEyhFCn - tV7sjippEePIlKln6BvVnz5ZLXHNYQgmubuNq8MC0KTwcaGJ_C0z_T8j4PZ - a1nfpbhSe-ePYaALrf_nIsSRKu7cWsrwOSlaRPecRnYeDd_ytAxEQWYEKFi - Pszc70fP9geZOB_09y9jq0vaOF0jGmpIAmgk71lCcUpSdrhNokTKo5y8MH8 - 3NcbIvmuZ51cjXQj1f0_AwM9RW3oCh2Hu0z0C5l4BujZVsDuGgMsGZsjUhS - RZsAQSXHCAmlJ2NlnN60U7y4SPJhKv5tKYw.48V1_ALb6US04U3b.5eym8T - W_c8SuK0ltJ3rpYIzOeDQz7TALvtu6UG9oMo4vpzs9tX_EFShS8iB7j6jiS - diwkIr3ajwQzaBtQD_A.XFBoMYUZodetZdvTiFvSkQ`) - - expectedFull := stripWhitespace(` - { "protected":"eyJhbGciOiJSU0EtT0FFUCIsImVuYyI6IkEyNTZHQ00ifQ", - "encrypted_key": - "ROQCfge4JPm_yACxv1C1NSXmwNbL6kvmCuyxBRGpW57DvlwByjyjsb - 6g8m7wtLMqKEyhFCntV7sjippEePIlKln6BvVnz5ZLXHNYQgmubuNq - 8MC0KTwcaGJ_C0z_T8j4PZa1nfpbhSe-ePYaALrf_nIsSRKu7cWsrw - OSlaRPecRnYeDd_ytAxEQWYEKFiPszc70fP9geZOB_09y9jq0vaOF0 - jGmpIAmgk71lCcUpSdrhNokTKo5y8MH83NcbIvmuZ51cjXQj1f0_Aw - M9RW3oCh2Hu0z0C5l4BujZVsDuGgMsGZsjUhSRZsAQSXHCAmlJ2Nln - N60U7y4SPJhKv5tKYw", - "iv": "48V1_ALb6US04U3b", - "ciphertext": - "5eym8TW_c8SuK0ltJ3rpYIzOeDQz7TALvtu6UG9oMo4vpzs9tX_EFS - hS8iB7j6jiSdiwkIr3ajwQzaBtQD_A", - "tag":"XFBoMYUZodetZdvTiFvSkQ" }`) - - // Mock random reader - randReader = bytes.NewReader([]byte{ - // Encryption key - 177, 161, 244, 128, 84, 143, 225, 115, 63, 180, 3, 255, 107, 154, - 212, 246, 138, 7, 110, 91, 112, 46, 34, 105, 47, 130, 203, 46, 122, - 234, 64, 252, - // Randomness for RSA-OAEP - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - // Initialization vector - 227, 197, 117, 252, 2, 219, 233, 68, 180, 225, 77, 219}) - defer resetRandReader() - - // Encrypt with a dummy key - encrypter, err := NewEncrypter(RSA_OAEP, A256GCM, publicKey) - if err != nil { - panic(err) - } - - object, err := encrypter.Encrypt(plaintext) - if err != nil { - panic(err) - } - - serialized, err := object.CompactSerialize() - if serialized != expectedCompact { - t.Error("Compact serialization is not what we expected", serialized, expectedCompact) - } - - serialized = object.FullSerialize() - if serialized != expectedFull { - t.Error("Full serialization is not what we expected") - } -} - -func TestVectorsJWECorrupt(t *testing.T) { - priv := &rsa.PrivateKey{ - PublicKey: rsa.PublicKey{ - N: fromHexInt(` - a8b3b284af8eb50b387034a860f146c4919f318763cd6c5598c8 - ae4811a1e0abc4c7e0b082d693a5e7fced675cf4668512772c0c - bc64a742c6c630f533c8cc72f62ae833c40bf25842e984bb78bd - bf97c0107d55bdb662f5c4e0fab9845cb5148ef7392dd3aaff93 - ae1e6b667bb3d4247616d4f5ba10d4cfd226de88d39f16fb`), - E: 65537, - }, - D: fromHexInt(` - 53339cfdb79fc8466a655c7316aca85c55fd8f6dd898fdaf1195 - 17ef4f52e8fd8e258df93fee180fa0e4ab29693cd83b152a553d - 4ac4d1812b8b9fa5af0e7f55fe7304df41570926f3311f15c4d6 - 5a732c483116ee3d3d2d0af3549ad9bf7cbfb78ad884f84d5beb - 04724dc7369b31def37d0cf539e9cfcdd3de653729ead5d1`), - Primes: []*big.Int{ - fromHexInt(` - d32737e7267ffe1341b2d5c0d150a81b586fb3132bed2f8d5262 - 864a9cb9f30af38be448598d413a172efb802c21acf1c11c520c - 2f26a471dcad212eac7ca39d`), - fromHexInt(` - cc8853d1d54da630fac004f471f281c7b8982d8224a490edbeb3 - 3d3e3d5cc93c4765703d1dd791642f1f116a0dd852be2419b2af - 72bfe9a030e860b0288b5d77`), - }, - } - - corruptCiphertext := stripWhitespace(` - eyJhbGciOiJSU0EtT0FFUCIsImVuYyI6IkExMjhHQ00ifQ.NFl09dehy - IR2Oh5iSsvEa82Ps7DLjRHeo0RnuTuSR45OsaIP6U8yu7vLlWaZKSZMy - B2qRBSujf-5XIRoNhtyIyjk81eJRXGa_Bxaor1XBCMyyhGchW2H2P71f - PhDO6ufSC7kV4bNqgHR-4ziS7KXwzN83_5kogXqxUpymUoJDNc.tk-GT - W_VVhiTIKFF.D_BE6ImZUl9F.52a-zFnRb3YQwIC7UrhVyQ`) - - corruptAuthtag := stripWhitespace(` - eyJhbGciOiJSU0EtT0FFUCIsImVuYyI6IkExMjhHQ00ifQ.NFl09dehy - IR2Oh5iSsvEa82Ps7DLjRHeo0RnuTuSR45OsaIP6U8yu7vLlWaZKSZMy - B2qRBSujf-5XIRoNhtyIyjk81eJRXGa_Bxaor1XBCMyyhGchW2H2P71f - PhDO6ufSC7kV4bNqgHR-4ziS7KNwzN83_5kogXqxUpymUoJDNc.tk-GT - W_VVhiTIKFF.D_BE6ImZUl9F.52a-zFnRb3YQwiC7UrhVyQ`) - - msg, _ := ParseEncrypted(corruptCiphertext) - _, err := msg.Decrypt(priv) - if err != ErrCryptoFailure { - t.Error("should detect corrupt ciphertext") - } - - msg, _ = ParseEncrypted(corruptAuthtag) - _, err = msg.Decrypt(priv) - if err != ErrCryptoFailure { - t.Error("should detect corrupt auth tag") - } -} - -// Test vectors generated with nimbus-jose-jwt -func TestSampleNimbusJWEMessagesRSA(t *testing.T) { - rsaPrivateKey, err := LoadPrivateKey(fromBase64Bytes(` - MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCNRCEmf5PlbXKuT4uwnb - wGKvFrtpi+bDYxOZxxqxdVkZM/bYATAnD1fg9pNvLMKeF+MWJ9kPIMmDgOh9RdnRdLvQGb - BzhLmxwhhcua2QYiHEZizXmiaXvNP12bzEBhebdX7ObW8izMVW0p0lqHPNzkK3K75B0Sxo - FMVKkZ7KtBHgepBT5yPhPPcNe5lXQeTne5bo3I60DRcN9jTBgMJOXdq0I9o4y6ZmoXdNTm - 0EyLzn9/EYiHqBxtKFh791EHR7wYgyi/t+nOKr4sO74NbEByP0mHDil+mPvZSzFW4l7fPx - OclRZvpRIKIub2TroZA9s2WsshGf79eqqXYbBB9NNRAgMBAAECggEAIExbZ/nzTplfhwsY - 3SCzRJW87OuqsJ79JPQPGM4NX7sQ94eJqM7+FKLl0yCFErjgnYGdCyiArvB+oJPdsimgke - h83X0hGeg03lVA3/6OsG3WifCAxulnLN44AM8KST8S9D9t5+cm5vEBLHazzAfWWTS13s+g - 9hH8rf8NSqgZ36EutjKlvLdHx1mWcKX7SREFVHT8FWPAbdhTLEHUjoWHrfSektnczaSHnt - q8fFJy6Ld13QkF1ZJRUhtA24XrD+qLTc+M36IuedjeZaLHFB+KyhYR3YvXEtrbCug7dCRd - uG6uTlDCSaSy7xHeTPolWtWo9F202jal54otxiAJFGUHgQKBgQDRAT0s6YQZUfwE0wluXV - k0JdhDdCo8sC1aMmKlRKWUkBAqrDl7BI3MF56VOr4ybr90buuscshFf9TtrtBOjHSGcfDI - tSKfhhkW5ewQKB0YqyHzoD6UKT0/XAshFY3esc3uCxuJ/6vOiXV0og9o7eFvr51O0TfDFh - mcTvW4wirKlQKBgQCtB7UAu8I9Nn8czkd6oXLDRyTWYviuiqFmxR+PM9klgZtsumkeSxO1 - lkfFoj9+G8nFaqYEBA9sPeNtJVTSROCvj/iQtoqpV2NiI/wWeVszpBwsswx2mlks4LJa8a - Yz9xrsfNoroKYVppefc/MCoSx4M+99RSm3FSpLGZQHAUGyzQKBgQDMQmq4JuuMF1y2lk0E - SESyuz21BqV0tDVOjilsHT+5hmXWXoS6nkO6L2czrrpM7YE82F6JJZBmo7zEIXHBInGLJ3 - XLoYLZ5qNEhqYDUEDHaBCBWZ1vDTKnZlwWFEuXVavNNZvPbUhKTHq25t8qjDki/r09Vykp - BsM2yNBKpbBOVQKBgCJyUVd3CaFUExQyAMrqD0XPCQdhJq7gzGcAQVsp8EXmOoH3zmuIeM - ECzQEMXuWFNLMHm0tbX5Kl83vMHcnKioyI9ewhWxOBYTitf0ceG8j5F97SOl32NmCXzwoJ - 55Oa0xJXfLuIvOe8hZzp4WwZmBfKBxiCR166aPQQgIawelrVAoGAEJsHomfCI4epxH4oMw - qYJMCGy95zloB+2+c86BZCOJAGwnfzbtc2eutWZw61/9sSO8sQCfzA8oX+5HwAgnFVzwW4 - lNMZohppYcpwN9EyjkPaCXuALC7p5rF2o63wY7JLvnjS2aYZliknh2yW6X6fSB0PK0Cpvd - lAIyRw6Kud0zI=`)) - if err != nil { - panic(err) - } - - rsaSampleMessages := []string{ - "eyJlbmMiOiJBMTI4R0NNIiwiYWxnIjoiUlNBMV81In0.EW0KOhHeoAxTBnLjYhh2T6HjwI-srNs6RpcSdZvE-GJ5iww3EYWBCmeGGj1UVz6OcBfwW3wllZ6GPOHU-hxVQH5KYpVOjkmrFIYU6-8BHhxBP_PjSJEBCZzjOgsCm9Th4-zmlO7UWTdK_UtwE7nk4X-kkmEy-aZBCShA8nFe2MVvqD5F7nvEWNFBOHh8ae_juo-kvycoIzvxLV9g1B0Zn8K9FAlu8YF1KiL5NFekn76f3jvAwlExuRbFPUx4gJN6CeBDK_D57ABsY2aBVDSiQceuYZxvCIAajqSS6dMT382FNJzAiQhToOpo_1w5FnnBjzJLLEKDk_I-Eo2YCWxxsQ.5mCMuxJqLRuPXGAr.Ghe4INeBhP3MDWGvyNko7qanKdZIzKjfeiU.ja3UlVWJXKNFJ-rZsJWycw", - "eyJlbmMiOiJBMTkyR0NNIiwiYWxnIjoiUlNBMV81In0.JsJeYoP0St1bRYNUaAmA34DAA27usE7RNuC2grGikBRmh1xrwUOpnEIXXpwr7fjVmNi52zzWkNHC8JkkRTrLcCh2VXvnOnarpH8DCr9qM6440bSrahzbxIvDds8z8q0wT1W4kjVnq1mGwGxg8RQNBWTV6Sp2FLQkZyjzt_aXsgYzr3zEmLZxB-d41lBS81Mguk_hdFJIg_WO4ao54lozvxkCn_uMiIZ8eLb8qHy0h-N21tiHGCaiC2vV8KXomwoqbJ0SXrEH4r9_R2J844H80TBZdbvNBd8whvoQNHvOX659LNs9EQ9xxvHU2kqGZekXBu7sDXXTjctMkMITobGSzw.1v5govaDvanP3LGp.llwYNBDrD7MwVLaFHesljlratfmndWs4XPQ.ZGT1zk9_yIKi2GzW6CuAyA", - "eyJlbmMiOiJBMjU2R0NNIiwiYWxnIjoiUlNBMV81In0.fBv3fA3TMS3ML8vlsCuvwdsKvB0ym8R30jJrlOiqkWKk7WVUkjDInFzr1zw3Owla6c5BqOJNoACXt4IWbkLbkoWV3tweXlWwpafuaWPkjLOUH_K31rS2fCX5x-MTj8_hScquVQXpbz3vk2EfulRmGXZc_8JU2NqQCAsYy3a28houqP3rDe5jEAvZS2SOFvJkKW--f5S-z39t1D7fNz1N8Btd9SmXWQzjbul5YNxI9ctqxhJpkKYpxOLlvrzdA6YdJjOlDx3n6S-HnSZGM6kQd_xKtAf8l1EGwhQmhbXhMhjVxMvGwE5BX7PAb8Ccde5bzOCJx-PVbVetuLb169ZYqQ._jiZbOPRR82FEWMZ.88j68LI-K2KT6FMBEdlz6amG5nvaJU8a-90.EnEbUTJsWNqJYKzfO0x4Yw", - "eyJlbmMiOiJBMTI4Q0JDLUhTMjU2IiwiYWxnIjoiUlNBMV81In0.bN6FN0qmGxhkESiVukrCaDVG3woL0xE-0bHN_Mu0WZXTQWbzzT-7jOvaN1xhGK8nzi8qpCSRgE5onONNB9i8OnJm3MMIxF7bUUEAXO9SUAFn2v--wNc4drPc5OjIu0RiJrDVDkkGjNrBDIuBaEQcke7A0v91PH58dXE7o4TLPzC8UJmRtXWhUSwjXVF3-UmYRMht2rjHJlvRbtm6Tu2LMBIopRL0zj6tlPP4Dm7I7sz9OEB3VahYAhpXnFR7D_f8RjLSXQmBvB1FiI5l_vMz2NFt2hYUmQF3EJMLIEdHvvPp3iHDGiXC1obJrDID_CCf3qs9UY7DMYL622KLvP2NIg.qb72oxECzxd_aNuHVR0aNg.Gwet9Ms8hB8rKEb0h4RGdFNRq97Qs2LQaJM0HWrCqoI.03ljVThOFvgXzMmQJ79VjQ", - "eyJlbmMiOiJBMTkyQ0JDLUhTMzg0IiwiYWxnIjoiUlNBMV81In0.ZbEOP6rqdiIP4g7Nl1PL5gwhgDwv9RinyiUQxZXPOmD7kwEZrZ093dJnhqI9kEd3QGFlHDpB7HgNz53d27z2zmEj1-27v6miizq6tH4sN2MoeZLwSyk16O1_n3bVdDmROawsTYYFJfHsuLwyVJxPd37duIYnbUCFO9J8lLIv-2VI50KJ1t47YfE4P-Wt9jVzxP2CVUQaJwTlcwfiDLJTagYmfyrDjf525WlQFlgfJGqsJKp8BX9gmKvAo-1iCBAM8VpEjS0u0_hW9VSye36yh8BthVV-VJkhJ-0tMpto3bbBmj7M25Xf4gbTrrVU7Nz6wb18YZuhHZWmj2Y2nHV6Jg.AjnS44blTrIIfFlqVw0_Mg.muCRgaEXNKKpW8rMfW7jf7Zpn3VwSYDz-JTRg16jZxY.qjc9OGlMaaWKDWQSIwVpR4K556Pp6SF9", - "eyJlbmMiOiJBMjU2Q0JDLUhTNTEyIiwiYWxnIjoiUlNBMV81In0.c7_F1lMlRHQQE3WbKmtHBYTosdZrG9hPfs-F9gNQYet61zKG8NXVkSy0Zf2UFHt0vhcO8hP2qrqOFsy7vmRj20xnGHQ2EE29HH6hwX5bx1Jj3uE5WT9Gvh0OewpvF9VubbwWTIObBpdEG7XdJsMAQlIxtXUmQYAtLTWcy2ZJipyJtVlWQLaPuE8BKfZH-XAsp2CpQNiRPI8Ftza3EAspiyRfVQbjKt7nF8nuZ2sESjt7Y50q4CSiiCuGT28T3diMN0_rWrH-I-xx7OQvJlrQaNGglGtu3jKUcrJDcvxW2e1OxriaTeuQ848ayuRvGUNeSv6WoVYmkiK1x_gNwUAAbw.7XtSqHJA7kjt6JrfxJMwiA.Yvi4qukAbdT-k-Fd2s4G8xzL4VFxaFC0ZIzgFDAI6n0.JSWPJ-HjOE3SK9Lm0yHclmjS7Z1ahtQga9FHGCWVRcc", - "eyJlbmMiOiJBMTI4R0NNIiwiYWxnIjoiUlNBLU9BRVAifQ.SYVxJbCnJ_tcR13LJpaqHQj-nGNkMxre4A1FmnUdxnvzeJwuvyrLiUdRsZR1IkP4fqLtDON2mumx39QeJQf0WIObPBYlIxycRLkwxDHRVlyTmPvdZHAxN26jPrk09wa5SgK1UF1W1VSQIPm-Tek8jNAmarF1Yxzxl-t54wZFlQiHP4TuaczugO5f-J4nlWenfla2mU1snDgdUMlEZGOAQ_gTEtwSgd1MqXmK_7LZBkoDqqoCujMZhziafJPXPDaUUqBLW3hHkkDA7GpVec3XcTtNUWQJqOpMyQhqo1KQMc8jg3fuirILp-hjvvNVtBnCRBvbrKUCPzu2_yH3HM_agA.2VsdijtonAxShNIW.QzzB3P9CxYP3foNKN0Ma1Z9tMwijAlkWo08.ZdQkIPDY_M-hxqi5fD4NGw", - "eyJlbmMiOiJBMTkyR0NNIiwiYWxnIjoiUlNBLU9BRVAifQ.Z2oTJXXib1u-S38Vn3DRKE3JnhnwgUa92UhsefzY2Wpdn0dmxMfYt9iRoJGFfSAcA97MOfjyvXVRCKWXGrG5AZCMAXEqU8SNQwKPRjlcqojcVzQyMucXI0ikLC4mUgeRlfKTwsBicq6JZZylzRoLGGSNJQbni3_BLsf7H3Qor0BYg0FPCLG9Z2OVvrFzvjTLmZtV6gFlVrMHBxJub_aUet9gAkxiu1Wx_Kx46TlLX2tkumXIpTGlzX6pef6jLeZ5EIg_K-Uz4tkWgWQIEkLD7qmTyk5pAGmzukHa_08jIh5-U-Sd8XGZdx4J1pVPJ5CPg0qDJGZ_cfgkgpWbP_wB6A.4qgKfokK1EwYxz20._Md82bv_KH2Vru0Ue2Eb6oAqHP2xBBP5jF8.WFRojvQpD5VmZlOr_dN0rQ", - "eyJlbmMiOiJBMjU2R0NNIiwiYWxnIjoiUlNBLU9BRVAifQ.JzCUgJcBJmBgByp4PBAABUfhezPvndxBIVzaoZ96DAS0HPni0OjMbsOGsz6JwNsiTr1gSn_S6R1WpZM8GJc9R2z0EKKVP67TR62ZSG0MEWyLpHmG_4ug0fAp1HWWMa9bT4ApSaOLgwlpVAb_-BPZZgIu6c8cREuMon6UBHDqW1euTBbzk8zix3-FTZ6p5b_3soDL1wXfRiRBEsxxUGMnpryx1OFb8Od0JdyGF0GgfLt6OoaujDJpo-XtLRawu1Xlg6GqRs0NQwSHZ5jXgQ6-zgCufXonAmYTiIyBXY2no9XmECTexjwrS_05nA7H-UyIZEBOCp3Yhz2zxrt5j_0pvQ.SJR-ghhaUKP4zXtZ.muiuzLfZA0y0BDNsroGTw2r2-l73SLf9lK8.XFMH1oHr1G6ByP3dWSUUPA", - "eyJlbmMiOiJBMTI4Q0JDLUhTMjU2IiwiYWxnIjoiUlNBLU9BRVAifQ.U946MVfIm4Dpk_86HrnIA-QXyiUu0LZ67PL93CMLmEtJemMNDqmRd9fXyenCIhAC7jPIV1aaqW7gS194xyrrnUpBoJBdbegiPqOfquy493Iq_GQ8OXnFxFibPNQ6rU0l8BwIfh28ei_VIF2jqN6bhxFURCVW7fG6n6zkCCuEyc7IcxWafSHjH2FNttREuVj-jS-4LYDZsFzSKbpqoYF6mHt8H3btNEZDTSmy_6v0fV1foNtUKNfWopCp-iE4hNh4EzJfDuU8eXLhDb03aoOockrUiUCh-E0tQx9su4rOv-mDEOHHAQK7swm5etxoa7__9PC3Hg97_p4GM9gC9ykNgw.pnXwvoSPi0kMQP54of-HGg.RPJt1CMWs1nyotx1fOIfZ8760mYQ69HlyDp3XmdVsZ8.Yxw2iPVWaBROFE_FGbvodA", - "eyJlbmMiOiJBMTkyQ0JDLUhTMzg0IiwiYWxnIjoiUlNBLU9BRVAifQ.eKEOIJUJpXmO_ghH_nGCJmoEspqKyiy3D5l0P8lKutlo8AuYHPQlgOsaFYnDkypyUVWd9zi-JaQuCeo7dzoBiS1L71nAZo-SUoN0anQBkVuyuRjr-deJMhPPfq1H86tTk-4rKzPr1Ivd2RGXMtWsrUpNGk81r1v8DdMntLE7UxZQqT34ONuZg1IXnD_U6di7k07unI29zuU1ySeUr6w1YPw5aUDErMlpZcEJWrgOEYWaS2nuC8sWGlPGYEjqkACMFGn-y40UoS_JatNZO6gHK3SKZnXD7vN5NAaMo_mFNbh50e1t_zO8DaUdLtXPOBLcx_ULoteNd9H8HyDGWqwAPw.0xmtzJfeVMoIT1Cp68QrXA.841l1aA4c3uvSYfw6l180gn5JZQjL53WQ5fr8ejtvoI.lojzeWql_3gDq-AoaIbl_aGQRH_54w_f", - "eyJlbmMiOiJBMjU2Q0JDLUhTNTEyIiwiYWxnIjoiUlNBLU9BRVAifQ.D0QkvIXR1TL7dIHWuPNMybmmD8UPyQd1bRKjRDNbA2HmKGpamCtcJmpNB_EetNFe-LDmhe44BYI_XN2wIBbYURKgDK_WG9BH0LQw_nCVqQ-sKqjtj3yQeytXhLHYTDmiF0TO-uW-RFR7GbPAdARBfuf4zj82r_wDD9sD5WSCGx89iPfozDOYQ_OLwdL2WD99VvDyfwS3ZhxA-9IMSYv5pwqPkxj4C0JdjCqrN0YNrZn_1ORgjtsVmcWXsmusObTozUGA7n5GeVepfZdU1vrMulAwdRYqOYtlqKaOpFowe9xFN3ncBG7wb4f9pmzbS_Dgt-1_Ii_4SEB9GQ4NiuBZ0w.N4AZeCxMGUv52A0UVJsaZw.5eHOGbZdtahnp3l_PDY-YojYib4ft4SRmdsQ2kggrTs.WsmGH8ZDv4ctBFs7qsQvw2obe4dVToRcAQaZ3PYL34E", - "eyJlbmMiOiJBMTI4R0NNIiwiYWxnIjoiUlNBLU9BRVAtMjU2In0.fDTxO_ZzZ3Jdrdw-bxvg7u-xWB2q1tp3kI5zH6JfhLUm4h6rt9qDA_wZlRym8-GzEtkUjkTtQGs6HgQx_qlyy8ylCakY5GHsNhCG4m0UNhRiNfcasAs03JSXfON9-tfTJimWD9n4k5OHHhvcrsCW1G3jYeLsK9WHCGRIhNz5ULbo8HBrCTbmZ6bOEQ9mqhdssLpdV24HDpebotf3bgPJqoaTfWU6Uy7tLmPiNuuNRLQ-iTpLyNMTVvGqqZhpcV3lAEN5l77QabI5xLJYucvYjrXQhAEZ7YXO8oRYhGkdG2XXIRcwr87rBeRH-47HAyhZgF_PBPBhhrJNS9UNMqdfBw.FvU4_s7Md6vxnXWd.fw29Q4_gHt4f026DPPV-CNebQ8plJ6IVLX8._apBZrw7WsT8HOmxgCrTwA", - "eyJlbmMiOiJBMTkyR0NNIiwiYWxnIjoiUlNBLU9BRVAtMjU2In0.bYuorK-rHMbO4c2CRWtvyOEaM1EN-o-wLRZ0wFWRX9mCXQ-iTNarZn7ksYM1XnGmZ4u3CSowX1Hpca9Rg72_VJCmKapqCT7r3YfasN4_oeLwuSKI_gT-uVOznod97tn3Gf_EDv0y1V4H0k9BEIFGbajAcG1znTD_ODY3j2KZJxisfrsBoslc6N-HI0kKZMC2hSGuHOcOf8HN1sTE-BLqZCtoj-zxQECJK8Wh14Ih4jzzdmmiu_qmSR780K6su-4PRt3j8uY7oCiLBfwpCsCmhJgp8rKd91zoedZmamfvX38mJIfE52j4fG6HmIYw9Ov814fk9OffV6tzixjcg54Q2g.yeVJz4aSh2s-GUr9.TBzzWP5llEiDdugpP2SmPf2U4MEGG9EoPWk.g25UoWpsBaOd45J__FX7mA", - "eyJlbmMiOiJBMjU2R0NNIiwiYWxnIjoiUlNBLU9BRVAtMjU2In0.h9tFtmh762JuffBxlSQbJujCyI4Zs9yc3IOb1yR8g65W4ZHosIvzVGHWbShj4EY9MNrz-RbKtHfqQGGzDeo3Xb4-HcQ2ZDHyWoUg7VfA8JafJ5zIKL1npz8eUExOVMLsAaRfHg8qNfczodg3egoSmX5Q-nrx4DeidDSXYZaZjV0C72stLTPcuQ7XPV7z1tvERAkqpvcsRmJn_PiRNxIbAgoyHMJ4Gijuzt1bWZwezlxYmw0TEuwCTVC2fl9NJTZyxOntS1Lcm-WQGlPkVYeVgYTOQXLlp7tF9t-aAvYpth2oWGT6Y-hbPrjx_19WaKD0XyWCR46V32DlXEVDP3Xl2A.NUgfnzQyEaJjzt9r.k2To43B2YVWMeR-w3n4Pr2b5wYq2o87giHk.X8_QYCg0IGnn1pJqe8p_KA", - "eyJlbmMiOiJBMTI4Q0JDLUhTMjU2IiwiYWxnIjoiUlNBLU9BRVAtMjU2In0.EDq6cNP6Yp1sds5HZ4CkXYp7bs9plIYVZScKvuyxUy0H1VyBC_YWg0HvndPNb-vwh1LA6KMxRazlOwJ9iPR9YzHnYmGgPM3Je_ZzBfiPlRfq6hQBpGnNaypBI1XZ2tyFBhulsVLqyJe2SmM2Ud00kasOdMYgcN8FNFzq7IOE7E0FUQkIwLdUL1nrzepiYDp-5bGkxWRcL02cYfdqdm00G4m0GkUxAmdxa3oPNxZlt2NeBI_UVWQSgJE-DJVJQkDcyA0id27TV2RCDnmujYauNT_wYlyb0bFDx3pYzzNXfAXd4wHZxt75QaLZ5APJ0EVfiXJ0qki6kT-GRVmOimUbQA.vTULZL7LvS0WD8kR8ZUtLg.mb2f0StEmmkuuvsyz8UplMvF58FtZzlu8eEwzvPUvN0.hbhveEN40V-pgG2hSVgyKg", - "eyJlbmMiOiJBMTkyQ0JDLUhTMzg0IiwiYWxnIjoiUlNBLU9BRVAtMjU2In0.DuYk92p7u-YIN-JKn-XThmlVcnhU9x5TieQ2uhsLQVNlo0iWC9JJPP6bT6aI6u_1BIS3yE8_tSGGL7eM-zyEk6LuTqSWFRaZcZC06d0MnS9eYZcw1T2D17fL-ki-NtCaTahJD7jE2s0HevRVW49YtL-_V8whnO_EyVjvXIAQlPYqhH_o-0Nzcpng9ggdAnuF2rY1_6iRPYFJ3BLQvG1oWhyJ9s6SBttlOa0i6mmFCVLHx6sRpdGAB3lbCL3wfmHq4tpIv77gfoYUNP0SNff-zNmBXF_wp3dCntLZFTjbfMpGyHlruF_uoaLqwdjYpUGNUFVUoeSiMnSbMKm9NxiDgQ.6Mdgcqz7bMU1UeoAwFC8pg.W36QWOlBaJezakUX5FMZzbAgeAu_R14AYKZCQmuhguw.5OeyIJ03olxmJft8uBmjuOFQPWNZMYLI", - "eyJlbmMiOiJBMjU2Q0JDLUhTNTEyIiwiYWxnIjoiUlNBLU9BRVAtMjU2In0.ECulJArWFsPL2FlpCN0W8E7IseSjJg1cZqE3wz5jk9gvwgNForAUEv5KYZqhNI-p5IxkGV0f8K6Y2X8pWzbLwiPIjZe8_dVqHYJoINxqCSgWLBhz0V36qL9Nc_xARTBk4-ZteIu75NoXVeos9gNvFnkOCj4tm-jGo8z8EFO9XfODgjhiR4xv8VqUtvrkjo9GQConaga5zpV-J4JQlXbdqbDjnuwacnJAxYpFyuemqcgqsl6BnFX3tovGkmSUPqcvF1A6tiHqr-TEmcgVqo5C3xswknRBKTQRM00iAmJ92WlVdkoOCx6E6O7cVHFawZ14BLzWzm66Crb4tv0ucYvk_Q.mxolwUaoj5S5kHCfph0w8g.nFpgYdnYg3blHCCEi2XXQGkkKQBXs2OkZaH11m3PRvk.k8BAVT4EcyrUFVIKr-KOSPbF89xyL0Vri2rFTu2iIWM", - } - - for _, msg := range rsaSampleMessages { - obj, err := ParseEncrypted(msg) - if err != nil { - t.Error("unable to parse message", msg, err) - continue - } - plaintext, err := obj.Decrypt(rsaPrivateKey) - if err != nil { - t.Error("unable to decrypt message", msg, err) - continue - } - if string(plaintext) != "Lorem ipsum dolor sit amet" { - t.Error("plaintext is not what we expected for msg", msg) - } - } -} - -// Test vectors generated with nimbus-jose-jwt -func TestSampleNimbusJWEMessagesAESKW(t *testing.T) { - aesTestKeys := [][]byte{ - fromHexBytes("DF1FA4F36FFA7FC42C81D4B3C033928D"), - fromHexBytes("DF1FA4F36FFA7FC42C81D4B3C033928D95EC9CDC2D82233C"), - fromHexBytes("DF1FA4F36FFA7FC42C81D4B3C033928D95EC9CDC2D82233C333C35BA29044E90"), - } - - aesSampleMessages := [][]string{ - []string{ - "eyJ6aXAiOiJERUYiLCJlbmMiOiJBMTI4R0NNIiwidGFnIjoib2ZMd2Q5NGloVWFRckJ0T1pQUDdjUSIsImFsZyI6IkExMjhHQ01LVyIsIml2IjoiV2Z3TnN5cjEwWUFjY2p2diJ9.9x3RxdqIS6P9xjh93Eu1bQ.6fs3_fSGt2jull_5.YDlzr6sWACkFg_GU5MEc-ZEWxNLwI_JMKe_jFA.f-pq-V7rlSSg_q2e1gDygw", - "eyJ6aXAiOiJERUYiLCJlbmMiOiJBMTkyR0NNIiwidGFnIjoic2RneXB1ckFjTEFzTmZJU0lkZUNpUSIsImFsZyI6IkExMjhHQ01LVyIsIml2IjoieVFMR0dCdDJFZ0c1THdyViJ9.arslKo4aKlh6f4s0z1_-U-8JbmhAoZHN.Xw2Q-GX98YXwuc4i.halTEWMWAYZbv-qOD52G6bte4x6sxlh1_VpGEA.Z1spn016v58cW6Q2o0Qxag", - "eyJ6aXAiOiJERUYiLCJlbmMiOiJBMjU2R0NNIiwidGFnIjoicTNzejF5VUlhbVBDYXJfZ05kSVJqQSIsImFsZyI6IkExMjhHQ01LVyIsIml2IjoiM0ZRM0FsLWJWdWhmcEIyQyJ9.dhVipWbzIdsINttuZM4hnjpHvwEHf0VsVrOp4GAg01g.dk7dUyt1Qj13Pipw.5Tt70ONATF0BZAS8dBkYmCV7AQUrfb8qmKNLmw.A6ton9MQjZg0b3C0QcW-hg", - "eyJ6aXAiOiJERUYiLCJlbmMiOiJBMTI4Q0JDLUhTMjU2IiwidGFnIjoiUHNpTGphZnJZNE16UlRmNlBPLTZfdyIsImFsZyI6IkExMjhHQ01LVyIsIml2IjoiSUFPbnd2ODR5YXFEaUxtbSJ9.swf92_LyCvjsvkynHTuMNXRl_MX2keU-fMDWIMezHG4.LOp9SVIXzs4yTnOtMyXZYQ.HUlXrzqJ1qXYl3vUA-ydezCg77WvJNtKdmZ3FPABoZw.8UYl1LOofQLAxHHvWqoTbg", - "eyJ6aXAiOiJERUYiLCJlbmMiOiJBMTkyQ0JDLUhTMzg0IiwidGFnIjoiWGRndHQ5dUVEMVlVeU1rVHl6M3lqZyIsImFsZyI6IkExMjhHQ01LVyIsIml2IjoiWF90V2RhSmh6X3J1SHJvQSJ9.JQ3dS1JSgzIFi5M9ig63FoFU1nHBTmPwXY_ovNE2m1JOSUvHtalmihIuraPDloCf.e920JVryUIWt7zJJQM-www.8DUrl4LmsxIEhRr9RLTHG9tBTOcwXqEbQHAJd_qMHzE.wHinoqGUhL4O7lx125kponpwNtlp8VGJ", - "eyJ6aXAiOiJERUYiLCJlbmMiOiJBMjU2Q0JDLUhTNTEyIiwidGFnIjoicGgyaTdoY0FWNlh3ZkQta1RHYlVXdyIsImFsZyI6IkExMjhHQ01LVyIsIml2IjoiaG41Smk4Wm1rUmRrSUxWVSJ9._bQlJXl22dhsBgYPhkxUyinBNi871teGWbviOueWj2PqG9OPxIc9SDS8a27YLSVDMircd5Q1Df28--vcXIABQA.DssmhrAg6w_f2VDaPpxTbQ.OGclEmqrxwvZqAfn7EgXlIfXgr0wiGvEbZz3zADnqJs.YZeP0uKVEiDl8VyC-s20YN-RbdyGNsbdtoGDP3eMof8", - "eyJ6aXAiOiJERUYiLCJlbmMiOiJBMTI4R0NNIiwiYWxnIjoiQTEyOEtXIn0.TEMcXEoY8WyqGjYs5GZgS-M_Niwu6wDY.i-26KtTt51Td6Iwd.wvhkagvPsLj3QxhPBbfH_th8OqxisUtme2UadQ.vlfvBPv3bw2Zk2H60JVNLQ", - "eyJ6aXAiOiJERUYiLCJlbmMiOiJBMTkyR0NNIiwiYWxnIjoiQTEyOEtXIn0.gPaR6mgQ9TUx05V6DRfgTQeZxl0ZSzBa5uQd-qw6yLs.MojplOD77FkMooS-.2yuD7dKR_C3sFbhgwiBccKKOF8DrSvNiwX7wPQ.qDKUbSvMnJv0qifjpWC14g", - "eyJ6aXAiOiJERUYiLCJlbmMiOiJBMjU2R0NNIiwiYWxnIjoiQTEyOEtXIn0.Fg-dgSkUW1KEaL5YDPoWHNL8fpX1WxWVLA9OOWsjIFhQVDKyUZI7BQ.mjRBpyJTZf7H-quf.YlNHezMadtaSKp23G-ozmYhHOeHwuJnvWGTtGg.YagnR7awBItUlMDo4uklvg", - "eyJ6aXAiOiJERUYiLCJlbmMiOiJBMTI4Q0JDLUhTMjU2IiwiYWxnIjoiQTEyOEtXIn0.x1vYzUE-E2XBWva9OPuwtqfQaf9rlJCIBAyAe6N2q2kWfJrkxGxFsQ.gAwe78dyODFaoP2IOityAA.Yh5YfovkWxGBNAs1sVhvXow_2izHHsBiYEc9JYD6kVg.mio1p3ncp2wLEaEaRa7P0w", - "eyJ6aXAiOiJERUYiLCJlbmMiOiJBMTkyQ0JDLUhTMzg0IiwiYWxnIjoiQTEyOEtXIn0.szGrdnmF7D5put2aRBvSSFfp0vRgkRGYaafijJIqAF6PWd1IxsysZRV8aQkQOW1cB6d0fXsTfYM.Ru25LVOOk4xhaK-cIZ0ThA.pF9Ok5zot7elVqXFW5YYHV8MuF9gVGzpQnG1XDs_g_w.-7la0uwcNPpteev185pMHZjbVDXlrec8", - "eyJ6aXAiOiJERUYiLCJlbmMiOiJBMjU2Q0JDLUhTNTEyIiwiYWxnIjoiQTEyOEtXIn0.cz-hRv0xR5CnOcnoRWNK8Q9poyVYzRCVTjfmEXQN6xPOZUkJ3zKNqb8Pir_FS0o2TVvxmIbuxeISeATTR2Ttx_YGCNgMkc93.SF5rEQT94lZR-UORcMKqGw.xphygoU7zE0ZggOczXCi_ytt-Evln8CL-7WLDlWcUHg.5h99r8xCCwP2PgDbZqzCJ13oFfB2vZWetD5qZjmmVho", - }, - []string{ - "eyJ6aXAiOiJERUYiLCJlbmMiOiJBMTI4R0NNIiwidGFnIjoiVWR5WUVKdEJ5ZTA5dzdjclY0cXI1QSIsImFsZyI6IkExOTJHQ01LVyIsIml2IjoiZlBBV0QwUmdSbHlFdktQcCJ9.P1uTfTuH-imL-NJJMpuTRA.22yqZ1NIfx3KNPgc.hORWZaTSgni1FS-JT90vJly-cU37qTn-tWSqTg.gMN0ufXF92rSXupTtBNkhA", - "eyJ6aXAiOiJERUYiLCJlbmMiOiJBMTkyR0NNIiwidGFnIjoiOU9qX3B2LTJSNW5lZl9YbWVkUWltUSIsImFsZyI6IkExOTJHQ01LVyIsIml2IjoiY3BybGEwYUYzREVQNmFJTSJ9.6NVpAm_APiC7km2v-oNR8g23K9U_kf1-.jIg-p8tNwSvwxch0.1i-GPaxS4qR6Gy4tzeVtSdRFRSKQSMpmn-VhzA.qhFWPqtA6vVPl7OM3DThsA", - "eyJ6aXAiOiJERUYiLCJlbmMiOiJBMjU2R0NNIiwidGFnIjoiOVc3THg3MVhGQVJCb3NaLVZ5dXc4ZyIsImFsZyI6IkExOTJHQ01LVyIsIml2IjoiZ1N4ZE5heFdBSVBRR0tHYiJ9.3YjPz6dVQwAtCekvtXiHZrooOUlmCsMSvyfwmGwdrOA.hA_C0IDJmGaRzsB0.W4l7OPqpFxiVOZTGfAlRktquyRTo4cEOk9KurQ.l4bGxOkO_ql_jlPo3Oz3TQ", - "eyJ6aXAiOiJERUYiLCJlbmMiOiJBMTI4Q0JDLUhTMjU2IiwidGFnIjoiOHJYbWl2WXFWZjNfbHhhd2NUbHJoUSIsImFsZyI6IkExOTJHQ01LVyIsIml2IjoiVXBWeXprVTNKcjEwYXRqYyJ9.8qft-Q_xqUbo5j_aVrVNHchooeLttR4Kb6j01O8k98M.hXO-5IKBYCL9UdwBFVm0tg.EBM4lCZX_K6tfqYmfoDxVPHcf6cT--AegXTTjfSqsIw.Of8xUvEQSh3xgFT3uENnAg", - "eyJ6aXAiOiJERUYiLCJlbmMiOiJBMTkyQ0JDLUhTMzg0IiwidGFnIjoiVnItSnVaX0tqV2hSWWMzdzFwZ3cwdyIsImFsZyI6IkExOTJHQ01LVyIsIml2IjoiRGg2R3dISVBVS3ljZGNZeCJ9.YSEDjCnGWr_n9H94AvLoRnwm6bdU9w6-Q67k-QQRVcKRd6673pgH9zEF9A9Dt6o1.gcmVN4kxqBuMq6c7GrK3UQ.vWzJb0He6OY1lhYYjYS7CLh55REAAq1O7yNN-ND4R5Q.OD0B6nwyFaDr_92ysDOtlVnJaeoIqhGw", - "eyJ6aXAiOiJERUYiLCJlbmMiOiJBMjU2Q0JDLUhTNTEyIiwidGFnIjoieEtad1BGYURpQ3NqUnBqZUprZHhmZyIsImFsZyI6IkExOTJHQ01LVyIsIml2IjoieTVHRFdteXdkb2R1SDJlYyJ9.AW0gbhWqlptOQ1y9aoNVwrTIIkBfrp33C2OWJsbrDRk6lhxg_IgFhMDTE37moReySGUtttC4CXQD_7etHmd3Hw.OvKXK-aRKlXHOpJQ9ZY_YQ.Ngv7WarDDvR2uBj_DavPAR3DYuIaygvSSdcHrc8-ZqM.MJ6ElitzFCKf_0h5fIJw8uOLC6ps7dKZPozF8juQmUY", - "eyJ6aXAiOiJERUYiLCJlbmMiOiJBMTI4R0NNIiwiYWxnIjoiQTE5MktXIn0.8qu63pppcSvp1vv37WrZ44qcCTg7dQMA.cDp-f8dJTrDEpZW4.H6OBJYs4UvFR_IZHLYQZxB6u9a0wOdAif2LNfQ.1dB-id0UIwRSlmwHx5BJCg", - "eyJ6aXAiOiJERUYiLCJlbmMiOiJBMTkyR0NNIiwiYWxnIjoiQTE5MktXIn0._FdoKQvC8qUs7K0upriEihUwztK8gOwonXpOxdIwrfs.UO38ok8gDdpLVa1T.x1GvHdVCy4fxoQRg-OQK4Ez3jDOvu9gllLPeEA.3dLeZGIprh_nHizOTVi1xw", - "eyJ6aXAiOiJERUYiLCJlbmMiOiJBMjU2R0NNIiwiYWxnIjoiQTE5MktXIn0.uzCJskgSIK6VkjJIu-dQi18biqaY0INc_A1Ehx0oESafgtR99_n4IA.W2eKK8Y14WwTowI_.J2cJC7R6Bz6maR0s1UBMPyRi5BebNUAmof4pvw.-7w6htAlc4iUsOJ6I04rFg", - "eyJ6aXAiOiJERUYiLCJlbmMiOiJBMTI4Q0JDLUhTMjU2IiwiYWxnIjoiQTE5MktXIn0.gImQeQETp_6dfJypFDPLlv7c5pCzuq86U16gzrLiCXth6X9XfxJpvQ.YlC4MxjtLWrsyEvlFhvsqw.Vlpvmg9F3gkz4e1xG01Yl2RXx-jG99rF5UvCxOBXSLc.RZUrU_FoR5bG3M-j3GY0Dw", - "eyJ6aXAiOiJERUYiLCJlbmMiOiJBMTkyQ0JDLUhTMzg0IiwiYWxnIjoiQTE5MktXIn0.T2EfQ6Tu2wJyRMgZzfvBYmQNCCfdMudMrg86ibEMVAOUKJPtR3WMPEb_Syy9p2VjrLKRlv7nebo.GPc8VbarPPRtzIRATB8NsA.ugPCqLvVLwh55bWlwjsFkmWzJ31z5z-wuih2oJqmG_U.m7FY3EjvV6mKosEYJ5cY7ezFoVQoJS8X", - "eyJ6aXAiOiJERUYiLCJlbmMiOiJBMjU2Q0JDLUhTNTEyIiwiYWxnIjoiQTE5MktXIn0.OgLMhZ-2ZhslQyHfzOfyC-qmT6bNg9AdpP59B4jtyxWkQu3eW475WCdiAjojjeyBtVRGQ5vOomwaOIFejY_IekzH6I_taii3.U9x44MF6Wyz5TIwIzwhoxQ.vK7yvSF2beKdNxNY_7n4XdF7JluCGZoxdFJyTJVkSmI.bXRlI8KL-g7gpprQxGmXjVYjYghhWJq7mlCfWI8q2uA", - }, - []string{ - "eyJ6aXAiOiJERUYiLCJlbmMiOiJBMTI4R0NNIiwidGFnIjoiR3BjX3pfbjduZjJVZlEtWGdsaTBaQSIsImFsZyI6IkEyNTZHQ01LVyIsIml2IjoiUk40eUdhOVlvYlFhUmZ1TCJ9.Q4ukD6_hZpmASAVcqWJ9Wg.Zfhny_1WNdlp4fH-.3sekDCjkExQCcv28ZW4yrcFnz0vma3vgoenSXA.g8_Ird2Y0itTCDP61du-Yg", - "eyJ6aXAiOiJERUYiLCJlbmMiOiJBMTkyR0NNIiwidGFnIjoiWC05UkNVWVh4U3NRelcwelVJS01VUSIsImFsZyI6IkEyNTZHQ01LVyIsIml2IjoiY3JNMnJfa3RrdWpyQ1h5OSJ9.c0q2jCxxV4y1h9u_Xvn7FqUDnbkmNEG4.S_noOTZKuUo9z1l6.ez0RdA25vXMUGH96iXmj3DEVox0J7TasJMnzgg.RbuSPTte_NzTtEEokbc5Ig", - "eyJ6aXAiOiJERUYiLCJlbmMiOiJBMjU2R0NNIiwidGFnIjoiWmwyaDFpUW11QWZWd2lJeVp5RHloZyIsImFsZyI6IkEyNTZHQ01LVyIsIml2Ijoib19xZmljb0N0NzNzRWo1QyJ9.NpJxRJ0aqcpekD6HU2u9e6_pL_11JXjWvjfeQnAKkZU.4c5qBcBBrMWi27Lf.NKwNIb4b6cRDJ1TwMKsPrjs7ADn6aNoBdQClVw.yNWmSSRBqQfIQObzj8zDqw", - "eyJ6aXAiOiJERUYiLCJlbmMiOiJBMTI4Q0JDLUhTMjU2IiwidGFnIjoiMXdwVEI3LWhjdzZUVXhCbVh2UzdhUSIsImFsZyI6IkEyNTZHQ01LVyIsIml2IjoiOUdIVnZJaDZ0a09vX2pHUSJ9.MFgIhp9mzlq9hoPqqKVKHJ3HL79EBYtV4iNhD63yqiU.UzW5iq8ou21VpZYJgKEN8A.1gOEzA4uAPvHP76GMfs9uLloAV10mKaxiZVAeL7iQA0.i1X_2i0bCAz-soXF9bI_zw", - "eyJ6aXAiOiJERUYiLCJlbmMiOiJBMTkyQ0JDLUhTMzg0IiwidGFnIjoiNThocUtsSk15Y1BFUEFRUlNfSzlNUSIsImFsZyI6IkEyNTZHQ01LVyIsIml2IjoiUDh3aTBWMTluVnZqNXpkOSJ9.FXidOWHNFJODO74Thq3J2cC-Z2B8UZkn7SikeosU0bUK6Jx_lzzmUZ-Lafadpdpj.iLfcDbpuBKFiSfiBzUQc7Q.VZK-aD7BFspqfvbwa0wE2wwWxdomzk2IKMetFe8bI44.7wC6rJRGa4x48xbYMd6NH9VzK8uNn4Cb", - "eyJ6aXAiOiJERUYiLCJlbmMiOiJBMjU2Q0JDLUhTNTEyIiwidGFnIjoicGcwOEpUcXdzMXdEaXBaRUlpVExoQSIsImFsZyI6IkEyNTZHQ01LVyIsIml2IjoiSlpodk9CdU1RUDFFZTZTNSJ9.wqVgTPm6TcYCTkpbwmn9sW4mgJROH2A3dIdSXo5oKIQUIVbQsmy7KXH8UYO2RS9slMGtb869C8o0My67GKg9dQ.ogrRiLlqjB1S5j-7a05OwA.2Y_LyqhU4S_RXMsB74bxcBacd23J2Sp5Lblw-sOkaUY.XGMiYoU-f3GaEzSvG41vpJP2DMGbeDFoWmkUGLUjc4M", - "eyJ6aXAiOiJERUYiLCJlbmMiOiJBMTI4R0NNIiwiYWxnIjoiQTI1NktXIn0.QiIZm9NYfahqYFIbiaoUhCCHjotHMkup.EsU0XLn4FjzzCILn.WuCoQkm9vzo95E7hxBtfYpt-Mooc_vmSTyzj6Q.NbeeYVy6gQPlmhoWDrZwaQ", - "eyJ6aXAiOiJERUYiLCJlbmMiOiJBMTkyR0NNIiwiYWxnIjoiQTI1NktXIn0.1ol3j_Lt0Os3UMe2Gypj0o8b77k0FSmqD7kNRNoMa9U.vZ2HMTgN2dgUd42h.JvNcy8-c8sYzOC089VtFSg2BOQx3YF8CqSTuJw.t03LRioWWKN3d7SjinU6SQ", - "eyJ6aXAiOiJERUYiLCJlbmMiOiJBMjU2R0NNIiwiYWxnIjoiQTI1NktXIn0.gbkk03l1gyrE9qGEMVtORiyyUqKsgzbqjLd8lw0RQ07WWn--TV4BgA.J8ThH4ac2UhSsMIP.g-W1piEGrdi3tNwQDJXpYm3fQjTf82mtVCrCOg.-vY05P4kiB9FgF2vwrSeXQ", - "eyJ6aXAiOiJERUYiLCJlbmMiOiJBMTI4Q0JDLUhTMjU2IiwiYWxnIjoiQTI1NktXIn0.k86pQs7gmQIzuIWRFwesF32XY2xi1WbYxi7XUf_CYlOlehwGCTINHg.3NcC9VzfQgsECISKf4xy-g.v2amdo-rgeGsg-II_tvPukX9D-KAP27xxf2uQJ277Ws.E4LIE3fte3glAnPpnd8D9Q", - "eyJ6aXAiOiJERUYiLCJlbmMiOiJBMTkyQ0JDLUhTMzg0IiwiYWxnIjoiQTI1NktXIn0.b8iN0Am3fCUvj7sBd7Z0lpfzBjh1MOgojV7J5rDfrcTU3b35RGYgEV1RdcrtUTBgUwITDjmU7jM.wsSDBFghDga_ERv36I2AOg.6uJsucCb2YReFOJGBdo4zidTIKLUmZBIXfm_M0AJpKk.YwdAfXI3HHcw2wLSnfCRtw4huZQtSKhz", - "eyJ6aXAiOiJERUYiLCJlbmMiOiJBMjU2Q0JDLUhTNTEyIiwiYWxnIjoiQTI1NktXIn0.akY9pHCbkHPh5VpXIrX0At41XnJIKBR9iMMkf301vKeJNAZYJTxWzeJhFd-DhQ47tMctc3YYkwZkQ5I_9fGYb_f0oBcw4esh.JNwuuHud78h6S99NO1oBQQ.0RwckPYATBgvw67upkAQ1AezETHc-gh3rryz19i5ryc.3XClRTScgzfMgLCHxHHoRF8mm9VVGXv_Ahtx65PskKQ", - }, - } - - for i, msgs := range aesSampleMessages { - for _, msg := range msgs { - obj, err := ParseEncrypted(msg) - if err != nil { - t.Error("unable to parse message", msg, err) - continue - } - plaintext, err := obj.Decrypt(aesTestKeys[i]) - if err != nil { - t.Error("unable to decrypt message", msg, err) - continue - } - if string(plaintext) != "Lorem ipsum dolor sit amet" { - t.Error("plaintext is not what we expected for msg", msg) - } - } - } -} - -// Test vectors generated with jose4j -func TestSampleJose4jJWEMessagesECDH(t *testing.T) { - ecTestKey := &ecdsa.PrivateKey{ - PublicKey: ecdsa.PublicKey{ - Curve: elliptic.P256(), - X: fromBase64Int("weNJy2HscCSM6AEDTDg04biOvhFhyyWvOHQfeF_PxMQ"), - Y: fromBase64Int("e8lnCO-AlStT-NJVX-crhB7QRYhiix03illJOVAOyck"), - }, - D: fromBase64Int("VEmDZpDXXK8p8N0Cndsxs924q6nS1RXFASRl6BfUqdw"), - } - - ecSampleMessages := []string{ - "eyJhbGciOiJFQ0RILUVTIiwiZW5jIjoiQTEyOENCQy1IUzI1NiIsImVwayI6eyJrdHkiOiJFQyIsIngiOiJTQzAtRnJHUkVvVkpKSmg1TGhORmZqZnFXMC1XSUFyd3RZMzJzQmFQVVh3IiwieSI6ImFQMWlPRENveU9laTVyS1l2VENMNlRMZFN5UEdUN0djMnFsRnBwNXdiWFEiLCJjcnYiOiJQLTI1NiJ9fQ..3mifklTnTTGuA_etSUBBCw.dj8KFM8OlrQ3rT35nHcHZ7A5p84VB2OZb054ghSjS-M.KOIgnJjz87LGqMtikXGxXw", - "eyJhbGciOiJFQ0RILUVTIiwiZW5jIjoiQTE5MkNCQy1IUzM4NCIsImVwayI6eyJrdHkiOiJFQyIsIngiOiJUaHRGc0lRZ1E5MkZOYWFMbUFDQURLbE93dmNGVlRORHc4ampfWlJidUxjIiwieSI6IjJmRDZ3UXc3YmpYTm1nVThXMGpFbnl5ZUZkX3Y4ZmpDa3l1R29vTFhGM0EiLCJjcnYiOiJQLTI1NiJ9fQ..90zFayMkKc-fQC_19f6P3A.P1Y_7lMnfkUQOXW_en31lKZ3zAn1nEYn6fXLjmyVPrQ.hrgwy1cePVfhMWT0h-crKTXldglHZ-4g", - "eyJhbGciOiJFQ0RILUVTIiwiZW5jIjoiQTI1NkNCQy1IUzUxMiIsImVwayI6eyJrdHkiOiJFQyIsIngiOiI5R1Z6c3VKNWgySl96UURVUFR3WU5zUkFzVzZfY2RzN0pELVQ2RDREQ1ZVIiwieSI6InFZVGl1dVU4aTB1WFpoaS14VGlRNlZJQm5vanFoWENPVnpmWm1pR2lRTEUiLCJjcnYiOiJQLTI1NiJ9fQ..v2reRlDkIsw3eWEsTCc1NA.0qakrFdbhtBCTSl7EREf9sxgHBP9I-Xw29OTJYnrqP8.54ozViEBYYmRkcKp7d2Ztt4hzjQ9Vb5zCeijN_RQrcI", - "eyJhbGciOiJFQ0RILUVTK0EyNTZLVyIsImVuYyI6IkExMjhDQkMtSFMyNTYiLCJlcGsiOnsia3R5IjoiRUMiLCJ4IjoiOElUemg3VVFaaUthTWtfME9qX1hFaHZENXpUWjE2Ti13WVdjeTJYUC1tdyIsInkiOiJPNUJiVEk0bUFpU005ZmpCejBRU3pXaU5vbnl3cWlQLUN0RGgwdnNGYXNRIiwiY3J2IjoiUC0yNTYifX0.D3DP3wqPvJv4TYYfhnfrOG6nsM-MMH_CqGfnOGjgdXHNF7xRwEJBOA.WL9Kz3gNYA7S5Rs5mKcXmA.EmQkXhO_nFqAwxJWaM0DH4s3pmCscZovB8YWJ3Ru4N8.Bf88uzwfxiyTjpejU5B0Ng", - "eyJhbGciOiJFQ0RILUVTK0EyNTZLVyIsImVuYyI6IkExOTJDQkMtSFMzODQiLCJlcGsiOnsia3R5IjoiRUMiLCJ4IjoiMjlJMk4zRkF0UlBlNGhzYjRLWlhTbmVyV0wyTVhtSUN1LXJJaXhNSHpJQSIsInkiOiJvMjY1bzFReEdmbDhzMHQ0U1JROS00RGNpc3otbXh4NlJ6WVF4SktyeWpJIiwiY3J2IjoiUC0yNTYifX0.DRmsmXz6fCnLc_njDIKdpM7Oc4jTqd_yd9J94TOUksAstEUkAl9Ie3Wg-Ji_LzbdX2xRLXIimcw.FwJOHPQhnqKJCfxt1_qRnQ.ssx3q1ZYILsMTln5q-K8HVn93BVPI5ViusstKMxZzRs.zzcfzWNYSdNDdQ4CiHfymj0bePaAbVaT", - "eyJhbGciOiJFQ0RILUVTK0EyNTZLVyIsImVuYyI6IkEyNTZDQkMtSFM1MTIiLCJlcGsiOnsia3R5IjoiRUMiLCJ4IjoiRUp6bTViQnRzVXJNYTl2Y1Q2d1hZRXI3ZjNMcjB0N1V4SDZuZzdGcFF0VSIsInkiOiJRYTNDSDllVTFXYjItdFdVSDN3Sk9fTDVMZXRsRUlMQWNkNE9XR2tFd0hZIiwiY3J2IjoiUC0yNTYifX0.5WxwluZpVWAOJdVrsnDIlEc4_wfRE1gXOaQyx_rKkElNz157Ykf-JsAD7aEvXfx--NKF4js5zYyjeCtxWBhRWPOoNNZJlqV_.Iuo82-qsP2S1SgQQklAnrw.H4wB6XoLKOKWCu6Y3LPAEuHkvyvr-xAh4IBm53uRF8g._fOLKq0bqDZ8KNjni_MJ4olHNaYz376dV9eNmp9O9PU", - "eyJhbGciOiJFQ0RILUVTK0ExOTJLVyIsImVuYyI6IkExMjhDQkMtSFMyNTYiLCJlcGsiOnsia3R5IjoiRUMiLCJ4IjoiZktNSG5sRkoxajBTSnJ3WGtVWlpaX3BtWHdUQlJtcHhlaTkxdUpaczUycyIsInkiOiJLRkxKaXhEUTJQcjEybWp1aFdYb3pna2U1V3lhWnhmTWlxZkJ0OEJpbkRvIiwiY3J2IjoiUC0yNTYifX0.2LSD2Mw4tyYJyfsmpVmzBtJRd12jMEYGdlhFbaXIbKi5A33CGNQ1tg.s40aAjmZOvK8Us86FCBdHg.jpYSMAKp___oMCoWM495mTfbi_YC80ObeoCmGE3H_gs.A6V-jJJRY1yz24CaXGUbzg", - "eyJhbGciOiJFQ0RILUVTK0ExOTJLVyIsImVuYyI6IkExOTJDQkMtSFMzODQiLCJlcGsiOnsia3R5IjoiRUMiLCJ4IjoiSDRxcFUzeWtuRktWRnV4SmxLa3NZSE5ieHF3aXM0WWtCVVFHVE1Td05JQSIsInkiOiJHb0lpRUZaUGRRSHJCbVR4ZTA3akJoZmxrdWNqUjVoX1QwNWVXc3Zib0prIiwiY3J2IjoiUC0yNTYifX0.KTrwwV2uzD--gf3PGG-kjEAGgi7u0eMqZPZfa4kpyFGm3x8t2m1NHdz3t9rfiqjuaqsxPKhF4gs.cu16fEOzYaSxhHu_Ht9w4g.BRJdxVBI9spVtY5KQ6gTR4CNcKvmLUMKZap0AO-RF2I.DZyUaa2p6YCIaYtjWOjC9GN_VIYgySlZ", - "eyJhbGciOiJFQ0RILUVTK0ExOTJLVyIsImVuYyI6IkEyNTZDQkMtSFM1MTIiLCJlcGsiOnsia3R5IjoiRUMiLCJ4IjoieDBYSGRkSGM2Q0ktSnlfbUVMOEZZRExhWnV0UkVFczR4c3BMQmcwZk1jbyIsInkiOiJEa0xzOUJGTlBkTTVTNkpLYVJ3cnV1TWMwcUFzWW9yNW9fZWp6NXBNVXFrIiwiY3J2IjoiUC0yNTYifX0.mfCxJ7JYIqTMqcAh5Vp2USF0eF7OhOeluqda7YagOUJNwxA9wC9o23DSoLUylfrZUfanZrJJJcG69awlv-LY7anOLHlp3Ht5.ec48A_JWb4qa_PVHWZaTfQ.kDAjIDb3LzJpfxNh-DiAmAuaKMYaOGSTb0rkiJLuVeY.oxGCpPlii4pr89XMk4b9s084LucTqPGU6TLbOW2MZoc", - "eyJhbGciOiJFQ0RILUVTK0ExMjhLVyIsImVuYyI6IkExMjhDQkMtSFMyNTYiLCJlcGsiOnsia3R5IjoiRUMiLCJ4IjoiQXB5TnlqU2d0bmRUcFg0eENYenNDRnZva1l3X18weXg2dGRUYzdPUUhIMCIsInkiOiJYUHdHMDVDaW1vOGlhWmxZbDNsMEp3ZllhY1FZWHFuM2RRZEJUWFpldDZBIiwiY3J2IjoiUC0yNTYifX0.yTA2PwK9IPqkaGPenZ9R-gOn9m9rvcSEfuX_Nm8AkuwHIYLzzYeAEA.ZW1F1iyHYKfo-YoanNaIVg.PouKQD94DlPA5lbpfGJXY-EJhidC7l4vSayVN2vVzvA.MexquqtGaXKUvX7WBmD4bA", - "eyJhbGciOiJFQ0RILUVTK0ExMjhLVyIsImVuYyI6IkExOTJDQkMtSFMzODQiLCJlcGsiOnsia3R5IjoiRUMiLCJ4IjoiaDRWeGNzNVUzWk1fTlp4WmJxQ3hMTVB5UmEtR2ktSVNZa0xDTzE1RHJkZyIsInkiOiJFeVotS3dWNVE5OXlnWk5zU0lpSldpR3hqbXNLUk1WVE5sTTNSd1VYTFRvIiwiY3J2IjoiUC0yNTYifX0.wo56VISyL1QAbi2HLuVut5NGF2FvxKt7B8zHzJ3FpmavPozfbVZV08-GSYQ6jLQWJ4xsO80I4Kg.3_9Bo5ozvD96WHGhqp_tfQ.48UkJ6jk6WK70QItb2QZr0edKH7O-aMuVahTEeqyfW4.ulMlY2tbC341ct20YSmNdtc84FRz1I4g", - "eyJhbGciOiJFQ0RILUVTK0ExMjhLVyIsImVuYyI6IkEyNTZDQkMtSFM1MTIiLCJlcGsiOnsia3R5IjoiRUMiLCJ4IjoiN0xZRzZZWTJkel9ZaGNvNnRCcG1IX0tPREQ2X2hwX05tajdEc1c2RXgxcyIsInkiOiI5Y2lPeDcwUkdGT0tpVnBRX0NHQXB5NVlyeThDazBmUkpwNHVrQ2tjNmQ0IiwiY3J2IjoiUC0yNTYifX0.bWwW3J80k46HG1fQAZxUroko2OO8OKkeRavr_o3AnhJDMvp78OR229x-fZUaBm4uWv27_Yjm0X9T2H2lhlIli2Rl9v1PNC77.1NmsJBDGI1fDjRzyc4mtyA.9KfCFynQj7LmJq08qxAG4c-6ZPz1Lh3h3nUbgVwB0TI.cqech0d8XHzWfkWqgKZq1SlAfmO0PUwOsNVkuByVGWk", - } - - for _, msg := range ecSampleMessages { - obj, err := ParseEncrypted(msg) - if err != nil { - t.Error("unable to parse message", msg, err) - continue - } - plaintext, err := obj.Decrypt(ecTestKey) - if err != nil { - t.Error("unable to decrypt message", msg, err) - continue - } - if string(plaintext) != "Lorem ipsum dolor sit amet." { - t.Error("plaintext is not what we expected for msg", msg) - } - } -} diff --git a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/jwk_test.go b/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/jwk_test.go deleted file mode 100644 index d09eb632..00000000 --- a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/jwk_test.go +++ /dev/null @@ -1,525 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package jose - -import ( - "bytes" - "crypto" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rsa" - "encoding/hex" - "fmt" - "math/big" - "reflect" - "testing" -) - -func TestCurveSize(t *testing.T) { - size256 := curveSize(elliptic.P256()) - size384 := curveSize(elliptic.P384()) - size521 := curveSize(elliptic.P521()) - if size256 != 32 { - t.Error("P-256 have 32 bytes") - } - if size384 != 48 { - t.Error("P-384 have 48 bytes") - } - if size521 != 66 { - t.Error("P-521 have 66 bytes") - } -} - -func TestRoundtripRsaPrivate(t *testing.T) { - jwk, err := fromRsaPrivateKey(rsaTestKey) - if err != nil { - t.Error("problem constructing JWK from rsa key", err) - } - - rsa2, err := jwk.rsaPrivateKey() - if err != nil { - t.Error("problem converting RSA private -> JWK", err) - } - - if rsa2.N.Cmp(rsaTestKey.N) != 0 { - t.Error("RSA private N mismatch") - } - if rsa2.E != rsaTestKey.E { - t.Error("RSA private E mismatch") - } - if rsa2.D.Cmp(rsaTestKey.D) != 0 { - t.Error("RSA private D mismatch") - } - if len(rsa2.Primes) != 2 { - t.Error("RSA private roundtrip expected two primes") - } - if rsa2.Primes[0].Cmp(rsaTestKey.Primes[0]) != 0 { - t.Error("RSA private P mismatch") - } - if rsa2.Primes[1].Cmp(rsaTestKey.Primes[1]) != 0 { - t.Error("RSA private Q mismatch") - } -} - -func TestRsaPrivateInsufficientPrimes(t *testing.T) { - brokenRsaPrivateKey := rsa.PrivateKey{ - PublicKey: rsa.PublicKey{ - N: rsaTestKey.N, - E: rsaTestKey.E, - }, - D: rsaTestKey.D, - Primes: []*big.Int{rsaTestKey.Primes[0]}, - } - - _, err := fromRsaPrivateKey(&brokenRsaPrivateKey) - if err != ErrUnsupportedKeyType { - t.Error("expected unsupported key type error, got", err) - } -} - -func TestRsaPrivateExcessPrimes(t *testing.T) { - brokenRsaPrivateKey := rsa.PrivateKey{ - PublicKey: rsa.PublicKey{ - N: rsaTestKey.N, - E: rsaTestKey.E, - }, - D: rsaTestKey.D, - Primes: []*big.Int{ - rsaTestKey.Primes[0], - rsaTestKey.Primes[1], - big.NewInt(3), - }, - } - - _, err := fromRsaPrivateKey(&brokenRsaPrivateKey) - if err != ErrUnsupportedKeyType { - t.Error("expected unsupported key type error, got", err) - } -} - -func TestRoundtripEcPublic(t *testing.T) { - for i, ecTestKey := range []*ecdsa.PrivateKey{ecTestKey256, ecTestKey384, ecTestKey521} { - jwk, err := fromEcPublicKey(&ecTestKey.PublicKey) - - ec2, err := jwk.ecPublicKey() - if err != nil { - t.Error("problem converting ECDSA private -> JWK", i, err) - } - - if !reflect.DeepEqual(ec2.Curve, ecTestKey.Curve) { - t.Error("ECDSA private curve mismatch", i) - } - if ec2.X.Cmp(ecTestKey.X) != 0 { - t.Error("ECDSA X mismatch", i) - } - if ec2.Y.Cmp(ecTestKey.Y) != 0 { - t.Error("ECDSA Y mismatch", i) - } - } -} - -func TestRoundtripEcPrivate(t *testing.T) { - for i, ecTestKey := range []*ecdsa.PrivateKey{ecTestKey256, ecTestKey384, ecTestKey521} { - jwk, err := fromEcPrivateKey(ecTestKey) - - ec2, err := jwk.ecPrivateKey() - if err != nil { - t.Error("problem converting ECDSA private -> JWK", i, err) - } - - if !reflect.DeepEqual(ec2.Curve, ecTestKey.Curve) { - t.Error("ECDSA private curve mismatch", i) - } - if ec2.X.Cmp(ecTestKey.X) != 0 { - t.Error("ECDSA X mismatch", i) - } - if ec2.Y.Cmp(ecTestKey.Y) != 0 { - t.Error("ECDSA Y mismatch", i) - } - if ec2.D.Cmp(ecTestKey.D) != 0 { - t.Error("ECDSA D mismatch", i) - } - } -} - -func TestMarshalUnmarshal(t *testing.T) { - kid := "DEADBEEF" - - for i, key := range []interface{}{ecTestKey256, ecTestKey384, ecTestKey521, rsaTestKey} { - for _, use := range []string{"", "sig", "enc"} { - jwk := JsonWebKey{Key: key, KeyID: kid, Algorithm: "foo"} - if use != "" { - jwk.Use = use - } - - jsonbar, err := jwk.MarshalJSON() - if err != nil { - t.Error("problem marshaling", i, err) - } - - var jwk2 JsonWebKey - err = jwk2.UnmarshalJSON(jsonbar) - if err != nil { - t.Error("problem unmarshalling", i, err) - } - - jsonbar2, err := jwk2.MarshalJSON() - if err != nil { - t.Error("problem marshaling", i, err) - } - - if !bytes.Equal(jsonbar, jsonbar2) { - t.Error("roundtrip should not lose information", i) - } - if jwk2.KeyID != kid { - t.Error("kid did not roundtrip JSON marshalling", i) - } - - if jwk2.Algorithm != "foo" { - t.Error("alg did not roundtrip JSON marshalling", i) - } - - if jwk2.Use != use { - t.Error("use did not roundtrip JSON marshalling", i) - } - } - } -} - -func TestMarshalNonPointer(t *testing.T) { - type EmbedsKey struct { - Key JsonWebKey - } - - keyJson := []byte(`{ - "e": "AQAB", - "kty": "RSA", - "n": "vd7rZIoTLEe-z1_8G1FcXSw9CQFEJgV4g9V277sER7yx5Qjz_Pkf2YVth6wwwFJEmzc0hoKY-MMYFNwBE4hQHw" - }`) - var parsedKey JsonWebKey - err := UnmarshalJSON(keyJson, &parsedKey) - if err != nil { - t.Error(fmt.Sprintf("Error unmarshalling key: %v", err)) - return - } - ek := EmbedsKey{ - Key: parsedKey, - } - out, err := MarshalJSON(ek) - if err != nil { - t.Error(fmt.Sprintf("Error marshalling JSON: %v", err)) - return - } - expected := "{\"Key\":{\"kty\":\"RSA\",\"n\":\"vd7rZIoTLEe-z1_8G1FcXSw9CQFEJgV4g9V277sER7yx5Qjz_Pkf2YVth6wwwFJEmzc0hoKY-MMYFNwBE4hQHw\",\"e\":\"AQAB\"}}" - if string(out) != expected { - t.Error("Failed to marshal embedded non-pointer JWK properly:", string(out)) - } -} - -func TestMarshalUnmarshalInvalid(t *testing.T) { - // Make an invalid curve coordinate by creating a byte array that is one - // byte too large, and setting the first byte to 1 (otherwise it's just zero). - invalidCoord := make([]byte, curveSize(ecTestKey256.Curve)+1) - invalidCoord[0] = 1 - - keys := []interface{}{ - // Empty keys - &rsa.PrivateKey{}, - &ecdsa.PrivateKey{}, - // Invalid keys - &ecdsa.PrivateKey{ - PublicKey: ecdsa.PublicKey{ - // Missing values in pub key - Curve: elliptic.P256(), - }, - }, - &ecdsa.PrivateKey{ - PublicKey: ecdsa.PublicKey{ - // Invalid curve - Curve: nil, - X: ecTestKey256.X, - Y: ecTestKey256.Y, - }, - }, - &ecdsa.PrivateKey{ - // Valid pub key, but missing priv key values - PublicKey: ecTestKey256.PublicKey, - }, - &ecdsa.PrivateKey{ - // Invalid pub key, values too large - PublicKey: ecdsa.PublicKey{ - Curve: ecTestKey256.Curve, - X: big.NewInt(0).SetBytes(invalidCoord), - Y: big.NewInt(0).SetBytes(invalidCoord), - }, - D: ecTestKey256.D, - }, - nil, - } - - for i, key := range keys { - jwk := JsonWebKey{Key: key} - _, err := jwk.MarshalJSON() - if err == nil { - t.Error("managed to serialize invalid key", i) - } - } -} - -func TestWebKeyVectorsInvalid(t *testing.T) { - keys := []string{ - // Invalid JSON - "{X", - // Empty key - "{}", - // Invalid RSA keys - `{"kty":"RSA"}`, - `{"kty":"RSA","e":""}`, - `{"kty":"RSA","e":"XXXX"}`, - `{"kty":"RSA","d":"XXXX"}`, - // Invalid EC keys - `{"kty":"EC","crv":"ABC"}`, - `{"kty":"EC","crv":"P-256"}`, - `{"kty":"EC","crv":"P-256","d":"XXX"}`, - `{"kty":"EC","crv":"ABC","d":"dGVzdA","x":"dGVzdA"}`, - `{"kty":"EC","crv":"P-256","d":"dGVzdA","x":"dGVzdA"}`, - } - - for _, key := range keys { - var jwk2 JsonWebKey - err := jwk2.UnmarshalJSON([]byte(key)) - if err == nil { - t.Error("managed to parse invalid key:", key) - } - } -} - -// Test vectors from RFC 7520 -var cookbookJWKs = []string{ - // EC Public - stripWhitespace(`{ - "kty": "EC", - "kid": "bilbo.baggins@hobbiton.example", - "use": "sig", - "crv": "P-521", - "x": "AHKZLLOsCOzz5cY97ewNUajB957y-C-U88c3v13nmGZx6sYl_oJXu9 - A5RkTKqjqvjyekWF-7ytDyRXYgCF5cj0Kt", - "y": "AdymlHvOiLxXkEhayXQnNCvDX4h9htZaCJN34kfmC6pV5OhQHiraVy - SsUdaQkAgDPrwQrJmbnX9cwlGfP-HqHZR1" - }`), - - // EC Private - stripWhitespace(`{ - "kty": "EC", - "kid": "bilbo.baggins@hobbiton.example", - "use": "sig", - "crv": "P-521", - "x": "AHKZLLOsCOzz5cY97ewNUajB957y-C-U88c3v13nmGZx6sYl_oJXu9 - A5RkTKqjqvjyekWF-7ytDyRXYgCF5cj0Kt", - "y": "AdymlHvOiLxXkEhayXQnNCvDX4h9htZaCJN34kfmC6pV5OhQHiraVy - SsUdaQkAgDPrwQrJmbnX9cwlGfP-HqHZR1", - "d": "AAhRON2r9cqXX1hg-RoI6R1tX5p2rUAYdmpHZoC1XNM56KtscrX6zb - KipQrCW9CGZH3T4ubpnoTKLDYJ_fF3_rJt" - }`), - - // RSA Public - stripWhitespace(`{ - "kty": "RSA", - "kid": "bilbo.baggins@hobbiton.example", - "use": "sig", - "n": "n4EPtAOCc9AlkeQHPzHStgAbgs7bTZLwUBZdR8_KuKPEHLd4rHVTeT - -O-XV2jRojdNhxJWTDvNd7nqQ0VEiZQHz_AJmSCpMaJMRBSFKrKb2wqV - wGU_NsYOYL-QtiWN2lbzcEe6XC0dApr5ydQLrHqkHHig3RBordaZ6Aj- - oBHqFEHYpPe7Tpe-OfVfHd1E6cS6M1FZcD1NNLYD5lFHpPI9bTwJlsde - 3uhGqC0ZCuEHg8lhzwOHrtIQbS0FVbb9k3-tVTU4fg_3L_vniUFAKwuC - LqKnS2BYwdq_mzSnbLY7h_qixoR7jig3__kRhuaxwUkRz5iaiQkqgc5g - HdrNP5zw", - "e": "AQAB" - }`), - - // RSA Private - stripWhitespace(`{"kty":"RSA", - "kid":"juliet@capulet.lit", - "use":"enc", - "n":"t6Q8PWSi1dkJj9hTP8hNYFlvadM7DflW9mWepOJhJ66w7nyoK1gPNqFMSQRy - O125Gp-TEkodhWr0iujjHVx7BcV0llS4w5ACGgPrcAd6ZcSR0-Iqom-QFcNP - 8Sjg086MwoqQU_LYywlAGZ21WSdS_PERyGFiNnj3QQlO8Yns5jCtLCRwLHL0 - Pb1fEv45AuRIuUfVcPySBWYnDyGxvjYGDSM-AqWS9zIQ2ZilgT-GqUmipg0X - OC0Cc20rgLe2ymLHjpHciCKVAbY5-L32-lSeZO-Os6U15_aXrk9Gw8cPUaX1 - _I8sLGuSiVdt3C_Fn2PZ3Z8i744FPFGGcG1qs2Wz-Q", - "e":"AQAB", - "d":"GRtbIQmhOZtyszfgKdg4u_N-R_mZGU_9k7JQ_jn1DnfTuMdSNprTeaSTyWfS - NkuaAwnOEbIQVy1IQbWVV25NY3ybc_IhUJtfri7bAXYEReWaCl3hdlPKXy9U - vqPYGR0kIXTQRqns-dVJ7jahlI7LyckrpTmrM8dWBo4_PMaenNnPiQgO0xnu - ToxutRZJfJvG4Ox4ka3GORQd9CsCZ2vsUDmsXOfUENOyMqADC6p1M3h33tsu - rY15k9qMSpG9OX_IJAXmxzAh_tWiZOwk2K4yxH9tS3Lq1yX8C1EWmeRDkK2a - hecG85-oLKQt5VEpWHKmjOi_gJSdSgqcN96X52esAQ", - "p":"2rnSOV4hKSN8sS4CgcQHFbs08XboFDqKum3sc4h3GRxrTmQdl1ZK9uw-PIHf - QP0FkxXVrx-WE-ZEbrqivH_2iCLUS7wAl6XvARt1KkIaUxPPSYB9yk31s0Q8 - UK96E3_OrADAYtAJs-M3JxCLfNgqh56HDnETTQhH3rCT5T3yJws", - "q":"1u_RiFDP7LBYh3N4GXLT9OpSKYP0uQZyiaZwBtOCBNJgQxaj10RWjsZu0c6I - edis4S7B_coSKB0Kj9PaPaBzg-IySRvvcQuPamQu66riMhjVtG6TlV8CLCYK - rYl52ziqK0E_ym2QnkwsUX7eYTB7LbAHRK9GqocDE5B0f808I4s", - "dp":"KkMTWqBUefVwZ2_Dbj1pPQqyHSHjj90L5x_MOzqYAJMcLMZtbUtwKqvVDq3 - tbEo3ZIcohbDtt6SbfmWzggabpQxNxuBpoOOf_a_HgMXK_lhqigI4y_kqS1w - Y52IwjUn5rgRrJ-yYo1h41KR-vz2pYhEAeYrhttWtxVqLCRViD6c", - "dq":"AvfS0-gRxvn0bwJoMSnFxYcK1WnuEjQFluMGfwGitQBWtfZ1Er7t1xDkbN9 - GQTB9yqpDoYaN06H7CFtrkxhJIBQaj6nkF5KKS3TQtQ5qCzkOkmxIe3KRbBy - mXxkb5qwUpX5ELD5xFc6FeiafWYY63TmmEAu_lRFCOJ3xDea-ots", - "qi":"lSQi-w9CpyUReMErP1RsBLk7wNtOvs5EQpPqmuMvqW57NBUczScEoPwmUqq - abu9V0-Py4dQ57_bapoKRu1R90bvuFnU63SHWEFglZQvJDMeAvmj4sm-Fp0o - Yu_neotgQ0hzbI5gry7ajdYy9-2lNx_76aBZoOUu9HCJ-UsfSOI8"}`), -} - -// SHA-256 thumbprints of the above keys, hex-encoded -var cookbookJWKThumbprints = []string{ - "747ae2dd2003664aeeb21e4753fe7402846170a16bc8df8f23a8cf06d3cbe793", - "747ae2dd2003664aeeb21e4753fe7402846170a16bc8df8f23a8cf06d3cbe793", - "f63838e96077ad1fc01c3f8405774dedc0641f558ebb4b40dccf5f9b6d66a932", - "0fc478f8579325fcee0d4cbc6d9d1ce21730a6e97e435d6008fb379b0ebe47d4", -} - -func TestWebKeyVectorsValid(t *testing.T) { - for _, key := range cookbookJWKs { - var jwk2 JsonWebKey - err := jwk2.UnmarshalJSON([]byte(key)) - if err != nil { - t.Error("unable to parse valid key:", key, err) - } - } -} - -func TestThumbprint(t *testing.T) { - for i, key := range cookbookJWKs { - var jwk2 JsonWebKey - err := jwk2.UnmarshalJSON([]byte(key)) - if err != nil { - t.Error("unable to parse valid key:", key, err) - } - - tp, err := jwk2.Thumbprint(crypto.SHA256) - if err != nil { - t.Error("unable to compute thumbprint:", key, err) - } - - tpHex := hex.EncodeToString(tp) - if cookbookJWKThumbprints[i] != tpHex { - t.Error("incorrect thumbprint:", i, cookbookJWKThumbprints[i], tpHex) - } - } -} - -func TestMarshalUnmarshalJWKSet(t *testing.T) { - jwk1 := JsonWebKey{Key: rsaTestKey, KeyID: "ABCDEFG", Algorithm: "foo"} - jwk2 := JsonWebKey{Key: rsaTestKey, KeyID: "GFEDCBA", Algorithm: "foo"} - var set JsonWebKeySet - set.Keys = append(set.Keys, jwk1) - set.Keys = append(set.Keys, jwk2) - - jsonbar, err := MarshalJSON(&set) - if err != nil { - t.Error("problem marshalling set", err) - } - var set2 JsonWebKeySet - err = UnmarshalJSON(jsonbar, &set2) - if err != nil { - t.Error("problem unmarshalling set", err) - } - jsonbar2, err := MarshalJSON(&set2) - if err != nil { - t.Error("problem marshalling set", err) - } - if !bytes.Equal(jsonbar, jsonbar2) { - t.Error("roundtrip should not lose information") - } -} - -func TestJWKSetKey(t *testing.T) { - jwk1 := JsonWebKey{Key: rsaTestKey, KeyID: "ABCDEFG", Algorithm: "foo"} - jwk2 := JsonWebKey{Key: rsaTestKey, KeyID: "GFEDCBA", Algorithm: "foo"} - var set JsonWebKeySet - set.Keys = append(set.Keys, jwk1) - set.Keys = append(set.Keys, jwk2) - k := set.Key("ABCDEFG") - if len(k) != 1 { - t.Errorf("method should return slice with one key not %d", len(k)) - } - if k[0].KeyID != "ABCDEFG" { - t.Error("method should return key with ID ABCDEFG") - } -} - -func TestJWKSymmetricKey(t *testing.T) { - sample1 := `{"kty":"oct","alg":"A128KW","k":"GawgguFyGrWKav7AX4VKUg"}` - sample2 := `{"kty":"oct","k":"AyM1SysPpbyDfgZld3umj1qzKObwVMkoqQ-EstJQLr_T-1qS0gZH75aKtMN3Yj0iPS4hcgUuTwjAzZr1Z9CAow","kid":"HMAC key used in JWS spec Appendix A.1 example"}` - - var jwk1 JsonWebKey - UnmarshalJSON([]byte(sample1), &jwk1) - - if jwk1.Algorithm != "A128KW" { - t.Errorf("expected Algorithm to be A128KW, but was '%s'", jwk1.Algorithm) - } - expected1 := fromHexBytes("19ac2082e1721ab58a6afec05f854a52") - if !bytes.Equal(jwk1.Key.([]byte), expected1) { - t.Errorf("expected Key to be '%s', but was '%s'", hex.EncodeToString(expected1), hex.EncodeToString(jwk1.Key.([]byte))) - } - - var jwk2 JsonWebKey - UnmarshalJSON([]byte(sample2), &jwk2) - - if jwk2.KeyID != "HMAC key used in JWS spec Appendix A.1 example" { - t.Errorf("expected KeyID to be 'HMAC key used in JWS spec Appendix A.1 example', but was '%s'", jwk2.KeyID) - } - expected2 := fromHexBytes(` - 0323354b2b0fa5bc837e0665777ba68f5ab328e6f054c928a90f84b2d2502ebf - d3fb5a92d20647ef968ab4c377623d223d2e2172052e4f08c0cd9af567d080a3`) - if !bytes.Equal(jwk2.Key.([]byte), expected2) { - t.Errorf("expected Key to be '%s', but was '%s'", hex.EncodeToString(expected2), hex.EncodeToString(jwk2.Key.([]byte))) - } -} - -func TestJWKSymmetricRoundtrip(t *testing.T) { - jwk1 := JsonWebKey{Key: []byte{1, 2, 3, 4}} - marshaled, err := jwk1.MarshalJSON() - if err != nil { - t.Errorf("failed to marshal valid JWK object", err) - } - - var jwk2 JsonWebKey - err = jwk2.UnmarshalJSON(marshaled) - if err != nil { - t.Errorf("failed to unmarshal valid JWK object", err) - } - - if !bytes.Equal(jwk1.Key.([]byte), jwk2.Key.([]byte)) { - t.Error("round-trip of symmetric JWK gave different raw keys") - } -} - -func TestJWKSymmetricInvalid(t *testing.T) { - invalid := JsonWebKey{} - _, err := invalid.MarshalJSON() - if err == nil { - t.Error("excepted error on marshaling invalid symmetric JWK object") - } - - var jwk JsonWebKey - err = jwk.UnmarshalJSON([]byte(`{"kty":"oct"}`)) - if err == nil { - t.Error("excepted error on unmarshaling invalid symmetric JWK object") - } -} diff --git a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/jws_test.go b/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/jws_test.go deleted file mode 100644 index d8f94c14..00000000 --- a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/jws_test.go +++ /dev/null @@ -1,302 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package jose - -import ( - "fmt" - "strings" - "testing" -) - -func TestCompactParseJWS(t *testing.T) { - // Should parse - msg := "eyJhbGciOiJYWVoifQ.cGF5bG9hZA.c2lnbmF0dXJl" - _, err := ParseSigned(msg) - if err != nil { - t.Error("Unable to parse valid message:", err) - } - - // Messages that should fail to parse - failures := []string{ - // Not enough parts - "eyJhbGciOiJYWVoifQ.cGF5bG9hZA", - // Invalid signature - "eyJhbGciOiJYWVoifQ.cGF5bG9hZA.////", - // Invalid payload - "eyJhbGciOiJYWVoifQ.////.c2lnbmF0dXJl", - // Invalid header - "////.eyJhbGciOiJYWVoifQ.c2lnbmF0dXJl", - // Invalid header - "cGF5bG9hZA.cGF5bG9hZA.c2lnbmF0dXJl", - } - - for i := range failures { - _, err = ParseSigned(failures[i]) - if err == nil { - t.Error("Able to parse invalid message") - } - } -} - -func TestFullParseJWS(t *testing.T) { - // Messages that should succeed to parse - successes := []string{ - "{\"payload\":\"CUJD\",\"signatures\":[{\"protected\":\"e30\",\"header\":{\"kid\":\"XYZ\"},\"signature\":\"CUJD\"},{\"protected\":\"e30\",\"signature\":\"CUJD\"}]}", - } - - for i := range successes { - _, err := ParseSigned(successes[i]) - if err != nil { - t.Error("Unble to parse valid message", err, successes[i]) - } - } - - // Messages that should fail to parse - failures := []string{ - // Empty - "{}", - // Invalid JSON - "{XX", - // Invalid protected header - "{\"payload\":\"CUJD\",\"signatures\":[{\"protected\":\"CUJD\",\"header\":{\"kid\":\"XYZ\"},\"signature\":\"CUJD\"}]}", - // Invalid protected header - "{\"payload\":\"CUJD\",\"protected\":\"CUJD\",\"header\":{\"kid\":\"XYZ\"},\"signature\":\"CUJD\"}", - // Invalid protected header - "{\"payload\":\"CUJD\",\"signatures\":[{\"protected\":\"###\",\"header\":{\"kid\":\"XYZ\"},\"signature\":\"CUJD\"}]}", - // Invalid payload - "{\"payload\":\"###\",\"signatures\":[{\"protected\":\"CUJD\",\"header\":{\"kid\":\"XYZ\"},\"signature\":\"CUJD\"}]}", - // Invalid payload - "{\"payload\":\"CUJD\",\"signatures\":[{\"protected\":\"e30\",\"header\":{\"kid\":\"XYZ\"},\"signature\":\"###\"}]}", - } - - for i := range failures { - _, err := ParseSigned(failures[i]) - if err == nil { - t.Error("Able to parse invalid message", err, failures[i]) - } - } -} - -func TestRejectUnprotectedJWSNonce(t *testing.T) { - // No need to test compact, since that's always protected - - // Flattened JSON - input := `{ - "header": { "nonce": "should-cause-an-error" }, - "payload": "does-not-matter", - "signature": "does-not-matter" - }` - _, err := ParseSigned(input) - if err == nil { - t.Error("JWS with an unprotected nonce parsed as valid.") - } else if err != ErrUnprotectedNonce { - t.Errorf("Improper error for unprotected nonce: %v", err) - } - - // Full JSON - input = `{ - "payload": "does-not-matter", - "signatures": [{ - "header": { "nonce": "should-cause-an-error" }, - "signature": "does-not-matter" - }] - }` - _, err = ParseSigned(input) - if err == nil { - t.Error("JWS with an unprotected nonce parsed as valid.") - } else if err != ErrUnprotectedNonce { - t.Errorf("Improper error for unprotected nonce: %v", err) - } -} - -func TestVerifyFlattenedWithIncludedUnprotectedKey(t *testing.T) { - input := `{ - "header": { - "alg": "RS256", - "jwk": { - "e": "AQAB", - "kty": "RSA", - "n": "tSwgy3ORGvc7YJI9B2qqkelZRUC6F1S5NwXFvM4w5-M0TsxbFsH5UH6adigV0jzsDJ5imAechcSoOhAh9POceCbPN1sTNwLpNbOLiQQ7RD5mY_pSUHWXNmS9R4NZ3t2fQAzPeW7jOfF0LKuJRGkekx6tXP1uSnNibgpJULNc4208dgBaCHo3mvaE2HV2GmVl1yxwWX5QZZkGQGjNDZYnjFfa2DKVvFs0QbAk21ROm594kAxlRlMMrvqlf24Eq4ERO0ptzpZgm_3j_e4hGRD39gJS7kAzK-j2cacFQ5Qi2Y6wZI2p-FCq_wiYsfEAIkATPBiLKl_6d_Jfcvs_impcXQ" - } - }, - "payload": "Zm9vCg", - "signature": "hRt2eYqBd_MyMRNIh8PEIACoFtmBi7BHTLBaAhpSU6zyDAFdEBaX7us4VB9Vo1afOL03Q8iuoRA0AT4akdV_mQTAQ_jhTcVOAeXPr0tB8b8Q11UPQ0tXJYmU4spAW2SapJIvO50ntUaqU05kZd0qw8-noH1Lja-aNnU-tQII4iYVvlTiRJ5g8_CADsvJqOk6FcHuo2mG643TRnhkAxUtazvHyIHeXMxydMMSrpwUwzMtln4ZJYBNx4QGEq6OhpAD_VSp-w8Lq5HOwGQoNs0bPxH1SGrArt67LFQBfjlVr94E1sn26p4vigXm83nJdNhWAMHHE9iV67xN-r29LT-FjA" - }` - - jws, err := ParseSigned(input) - if err != nil { - t.Error("Unable to parse valid message.") - } - if len(jws.Signatures) != 1 { - t.Error("Too many or too few signatures.") - } - sig := jws.Signatures[0] - if sig.Header.JsonWebKey == nil { - t.Error("No JWK in signature header.") - } - payload, err := jws.Verify(sig.Header.JsonWebKey) - if err != nil { - t.Error(fmt.Sprintf("Signature did not validate: %v", err)) - } - if string(payload) != "foo\n" { - t.Error(fmt.Sprintf("Payload was incorrect: '%s' should have been 'foo\\n'", string(payload))) - } -} - -func TestVerifyFlattenedWithPrivateProtected(t *testing.T) { - // The protected field contains a Private Header Parameter name, per - // https://tools.ietf.org/html/draft-ietf-jose-json-web-signature-41#section-4 - // Base64-decoded, it's '{"nonce":"8HIepUNFZUa-exKTrXVf4g"}' - input := `{"header":{"alg":"RS256","jwk":{"kty":"RSA","n":"7ixeydcbxxppzxrBphrW1atUiEZqTpiHDpI-79olav5XxAgWolHmVsJyxzoZXRxmtED8PF9-EICZWBGdSAL9ZTD0hLUCIsPcpdgT_LqNW3Sh2b2caPL2hbMF7vsXvnCGg9varpnHWuYTyRrCLUF9vM7ES-V3VCYTa7LcCSRm56Gg9r19qar43Z9kIKBBxpgt723v2cC4bmLmoAX2s217ou3uCpCXGLOeV_BesG4--Nl3pso1VhCfO85wEWjmW6lbv7Kg4d7Jdkv5DjDZfJ086fkEAYZVYGRpIgAvJBH3d3yKDCrSByUEud1bWuFjQBmMaeYOrVDXO_mbYg5PwUDMhw","e":"AQAB"}},"protected":"eyJub25jZSI6IjhISWVwVU5GWlVhLWV4S1RyWFZmNGcifQ","payload":"eyJjb250YWN0IjpbIm1haWx0bzpmb29AYmFyLmNvbSJdfQ","signature":"AyvVGMgXsQ1zTdXrZxE_gyO63pQgotL1KbI7gv6Wi8I7NRy0iAOkDAkWcTQT9pcCYApJ04lXfEDZfP5i0XgcFUm_6spxi5mFBZU-NemKcvK9dUiAbXvb4hB3GnaZtZiuVnMQUb_ku4DOaFFKbteA6gOYCnED_x7v0kAPHIYrQnvIa-KZ6pTajbV9348zgh9TL7NgGIIsTcMHd-Jatr4z1LQ0ubGa8tS300hoDhVzfoDQaEetYjCo1drR1RmdEN1SIzXdHOHfubjA3ZZRbrF_AJnNKpRRoIwzu1VayOhRmdy1qVSQZq_tENF4VrQFycEL7DhG7JLoXC4T2p1urwMlsw"}` - - jws, err := ParseSigned(input) - if err != nil { - t.Error("Unable to parse valid message.") - } - if len(jws.Signatures) != 1 { - t.Error("Too many or too few signatures.") - } - sig := jws.Signatures[0] - if sig.Header.JsonWebKey == nil { - t.Error("No JWK in signature header.") - } - payload, err := jws.Verify(sig.Header.JsonWebKey) - if err != nil { - t.Error(fmt.Sprintf("Signature did not validate: %v", err)) - } - expected := "{\"contact\":[\"mailto:foo@bar.com\"]}" - if string(payload) != expected { - t.Error(fmt.Sprintf("Payload was incorrect: '%s' should have been '%s'", string(payload), expected)) - } -} - -// Test vectors generated with nimbus-jose-jwt -func TestSampleNimbusJWSMessagesRSA(t *testing.T) { - rsaPublicKey, err := LoadPublicKey(fromBase64Bytes(` - MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA3aLSGwbeX0ZA2Ha+EvELaIFGzO - 91+Q15JQc/tdGdCgGW3XAbrh7ZUhDh1XKzbs+UOQxqn3Eq4YOx18IG0WsJSuCaHQIxnDlZ - t/GP8WLwjMC0izlJLm2SyfM/EEoNpmTC3w6MQ2dHK7SZ9Zoq+sKijQd+V7CYdr8zHMpDrd - NKoEcR0HjmvzzdMoUChhkGH5TaNbZyollULTggepaYUKS8QphqdSDMWiSetKG+g6V87lv6 - CVYyK1FF6g7Esp5OOj5pNn3/bmF+7V+b7TvK91NCIlURCjE9toRgNoIP4TDnWRn/vvfZ3G - zNrtWmlizqz3r5KdvIs71ahWgMUSD4wfazrwIDAQAB`)) - if err != nil { - panic(err) - } - - rsaSampleMessages := []string{ - "eyJhbGciOiJSUzI1NiJ9.TG9yZW0gaXBzdW0gZG9sb3Igc2l0IGFtZXQ.YHX849fvekz6wJGeyqnQhFqyHFcUXNJKj3o2w3ddR46YLlsCopUJrlifRU_ZuTWzpYxt5oC--T2eoqMhlCvltSWrE5_1_EumqiMfAYsZULx9E6Jns7q3w7mttonYFSIh7aR3-yg2HMMfTCgoAY1y_AZ4VjXwHDcZ5gu1oZDYgvZF4uXtCmwT6e5YtR1m8abiWPF8BgoTG_BD3KV6ClLj_QQiNFdfdxAMDw7vKVOKG1T7BFtz6cDs2Q3ILS4To5E2IjcVSSYS8mi77EitCrWmrqbK_G3WCdKeUFGnMnyuKXaCDy_7FLpAZ6Z5RomRr5iskXeJZdZqIKcJV8zl4fpsPA", - "eyJhbGciOiJSUzM4NCJ9.TG9yZW0gaXBzdW0gZG9sb3Igc2l0IGFtZXQ.meyfoOTjAAjXHFYiNlU7EEnsYtbeUYeEglK6BL_cxISEr2YAGLr1Gwnn2HnucTnH6YilyRio7ZC1ohy_ZojzmaljPHqpr8kn1iqNFu9nFE2M16ZPgJi38-PGzppcDNliyzOQO-c7L-eA-v8Gfww5uyRaOJdiWg-hUJmeGBIngPIeLtSVmhJtz8oTeqeNdUOqQv7f7VRCuvagLhW1PcEM91VUS-gS0WEUXoXWZ2lp91No0v1O24izgX3__FKiX_16XhrOfAgJ82F61vjbTIQYwhexHPZyYTlXYt_scNRzFGhSKeGFin4zVdFLOXWJqKWdUd5IrDP5Nya3FSoWbWDXAg", - "eyJhbGciOiJSUzUxMiJ9.TG9yZW0gaXBzdW0gZG9sb3Igc2l0IGFtZXQ.rQPz0PDh8KyE2AX6JorgI0MLwv-qi1tcWlz6tuZuWQG1hdrlzq5tR1tQg1evYNc_SDDX87DWTSKXT7JEqhKoFixLfZa13IJrOc7FB8r5ZLx7OwOBC4F--OWrvxMA9Y3MTJjPN3FemQePUo-na2vNUZv-YgkcbuOgbO3hTxwQ7j1JGuqy-YutXOFnccdXvntp3t8zYZ4Mg1It_IyL9pzgGqHIEmMV1pCFGHsDa-wStB4ffmdhrADdYZc0q_SvxUdobyC_XzZCz9ENzGIhgwYxyyrqg7kjqUGoKmCLmoSlUFW7goTk9IC5SXdUyLPuESxOWNfHoRClGav230GYjPFQFA", - "eyJhbGciOiJQUzI1NiJ9.TG9yZW0gaXBzdW0gZG9sb3Igc2l0IGFtZXQ.UTtxjsv_6x4CdlAmZfAW6Lun3byMjJbcwRp_OlPH2W4MZaZar7aql052mIB_ddK45O9VUz2aphYVRvKPZY8WHmvlTUU30bk0z_cDJRYB9eIJVMOiRCYj0oNkz1iEZqsP0YgngxwuUDv4Q4A6aJ0Bo5E_rZo3AnrVHMHUjPp_ZRRSBFs30tQma1qQ0ApK4Gxk0XYCYAcxIv99e78vldVRaGzjEZmQeAVZx4tGcqZP20vG1L84nlhSGnOuZ0FhR8UjRFLXuob6M7EqtMRoqPgRYw47EI3fYBdeSivAg98E5S8R7R1NJc7ef-l03RvfUSY0S3_zBq_4PlHK6A-2kHb__w", - "eyJhbGciOiJSUzM4NCJ9.TG9yZW0gaXBzdW0gZG9sb3Igc2l0IGFtZXQ.meyfoOTjAAjXHFYiNlU7EEnsYtbeUYeEglK6BL_cxISEr2YAGLr1Gwnn2HnucTnH6YilyRio7ZC1ohy_ZojzmaljPHqpr8kn1iqNFu9nFE2M16ZPgJi38-PGzppcDNliyzOQO-c7L-eA-v8Gfww5uyRaOJdiWg-hUJmeGBIngPIeLtSVmhJtz8oTeqeNdUOqQv7f7VRCuvagLhW1PcEM91VUS-gS0WEUXoXWZ2lp91No0v1O24izgX3__FKiX_16XhrOfAgJ82F61vjbTIQYwhexHPZyYTlXYt_scNRzFGhSKeGFin4zVdFLOXWJqKWdUd5IrDP5Nya3FSoWbWDXAg", - "eyJhbGciOiJSUzUxMiJ9.TG9yZW0gaXBzdW0gZG9sb3Igc2l0IGFtZXQ.rQPz0PDh8KyE2AX6JorgI0MLwv-qi1tcWlz6tuZuWQG1hdrlzq5tR1tQg1evYNc_SDDX87DWTSKXT7JEqhKoFixLfZa13IJrOc7FB8r5ZLx7OwOBC4F--OWrvxMA9Y3MTJjPN3FemQePUo-na2vNUZv-YgkcbuOgbO3hTxwQ7j1JGuqy-YutXOFnccdXvntp3t8zYZ4Mg1It_IyL9pzgGqHIEmMV1pCFGHsDa-wStB4ffmdhrADdYZc0q_SvxUdobyC_XzZCz9ENzGIhgwYxyyrqg7kjqUGoKmCLmoSlUFW7goTk9IC5SXdUyLPuESxOWNfHoRClGav230GYjPFQFA", - } - - for _, msg := range rsaSampleMessages { - obj, err := ParseSigned(msg) - if err != nil { - t.Error("unable to parse message", msg, err) - continue - } - payload, err := obj.Verify(rsaPublicKey) - if err != nil { - t.Error("unable to verify message", msg, err) - continue - } - if string(payload) != "Lorem ipsum dolor sit amet" { - t.Error("payload is not what we expected for msg", msg) - } - } -} - -// Test vectors generated with nimbus-jose-jwt -func TestSampleNimbusJWSMessagesEC(t *testing.T) { - ecPublicKeyP256, err := LoadPublicKey(fromBase64Bytes("MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIg62jq6FyL1otEj9Up7S35BUrwGF9TVrAzrrY1rHUKZqYIGEg67u/imjgadVcr7y9Q32I0gB8W8FHqbqt696rA==")) - if err != nil { - panic(err) - } - ecPublicKeyP384, err := LoadPublicKey(fromBase64Bytes("MHYwEAYHKoZIzj0CAQYFK4EEACIDYgAEPXsVlqCtN2oTY+F+hFZm3M0ldYpb7IeeJM5wYmT0k1RaqzBFDhDMNnYK5Q5x+OyssZrAtHgYDFw02AVJhhng/eHRp7mqmL/vI3wbxJtrLKYldIbBA+9fYBQcKeibjlu5")) - if err != nil { - panic(err) - } - ecPublicKeyP521, err := LoadPublicKey(fromBase64Bytes("MIGbMBAGByqGSM49AgEGBSuBBAAjA4GGAAQAa2w3MMJ5FWD6tSf68G+Wy5jIhWXOD3IA7pE5IC/myQzo1lWcD8KS57SM6nm4POtPcxyLmDhL7FLuh8DKoIZyvtAAdK8+tOQP7XXRlT2bkvzIuazp05It3TAPu00YzTIpKfDlc19Y1lvf7etrbFqhShD92B+hHmhT4ddrdbPCBDW8hvU=")) - if err != nil { - panic(err) - } - - ecPublicKeys := []interface{}{ecPublicKeyP256, ecPublicKeyP384, ecPublicKeyP521} - - ecSampleMessages := []string{ - "eyJhbGciOiJFUzI1NiJ9.TG9yZW0gaXBzdW0gZG9sb3Igc2l0IGFtZXQ.MEWJVlvGRQyzMEGOYm4rwuiwxrX-6LjnlbaRDAuhwmnBm2Gtn7pRpGXRTMFZUXsSGDz2L1p-Hz1qn8j9bFIBtQ", - "eyJhbGciOiJFUzM4NCJ9.TG9yZW0gaXBzdW0gZG9sb3Igc2l0IGFtZXQ.nbdjPnJPYQtVNNdBIx8-KbFKplTxrz-hnW5UNhYUY7SBkwHK4NZnqc2Lv4DXoA0aWHq9eiypgOh1kmyPWGEmqKAHUx0xdIEkBoHk3ZsbmhOQuq2jL_wcMUG6nTWNhLrB", - "eyJhbGciOiJFUzUxMiJ9.TG9yZW0gaXBzdW0gZG9sb3Igc2l0IGFtZXQ.AeYNFC1rwIgQv-5fwd8iRyYzvTaSCYTEICepgu9gRId-IW99kbSVY7yH0MvrQnqI-a0L8zwKWDR35fW5dukPAYRkADp3Y1lzqdShFcEFziUVGo46vqbiSajmKFrjBktJcCsfjKSaLHwxErF-T10YYPCQFHWb2nXJOOI3CZfACYqgO84g", - } - - for i, msg := range ecSampleMessages { - obj, err := ParseSigned(msg) - if err != nil { - t.Error("unable to parse message", msg, err) - continue - } - payload, err := obj.Verify(ecPublicKeys[i]) - if err != nil { - t.Error("unable to verify message", msg, err) - continue - } - if string(payload) != "Lorem ipsum dolor sit amet" { - t.Error("payload is not what we expected for msg", msg) - } - } -} - -// Test vectors generated with nimbus-jose-jwt -func TestSampleNimbusJWSMessagesHMAC(t *testing.T) { - hmacTestKey := fromHexBytes("DF1FA4F36FFA7FC42C81D4B3C033928D") - - hmacSampleMessages := []string{ - "eyJhbGciOiJIUzI1NiJ9.TG9yZW0gaXBzdW0gZG9sb3Igc2l0IGFtZXQ.W5tc_EUhxexcvLYEEOckyyvdb__M5DQIVpg6Nmk1XGM", - "eyJhbGciOiJIUzM4NCJ9.TG9yZW0gaXBzdW0gZG9sb3Igc2l0IGFtZXQ.sBu44lXOJa4Nd10oqOdYH2uz3lxlZ6o32QSGHaoGdPtYTDG5zvSja6N48CXKqdAh", - "eyJhbGciOiJIUzUxMiJ9.TG9yZW0gaXBzdW0gZG9sb3Igc2l0IGFtZXQ.M0yR4tmipsORIix-BitIbxEPGaxPchDfj8UNOpKuhDEfnb7URjGvCKn4nOlyQ1z9mG1FKbwnqR1hOVAWSzAU_w", - } - - for _, msg := range hmacSampleMessages { - obj, err := ParseSigned(msg) - if err != nil { - t.Error("unable to parse message", msg, err) - continue - } - payload, err := obj.Verify(hmacTestKey) - if err != nil { - t.Error("unable to verify message", msg, err) - continue - } - if string(payload) != "Lorem ipsum dolor sit amet" { - t.Error("payload is not what we expected for msg", msg) - } - } -} - -// Test vectors generated with nimbus-jose-jwt -func TestErrorMissingPayloadJWS(t *testing.T) { - _, err := (&rawJsonWebSignature{}).sanitized() - if err == nil { - t.Error("was able to parse message with missing payload") - } - if !strings.Contains(err.Error(), "missing payload") { - t.Errorf("unexpected error message, should contain 'missing payload': %s", err) - } -} diff --git a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/signing_test.go b/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/signing_test.go deleted file mode 100644 index 981770d5..00000000 --- a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/signing_test.go +++ /dev/null @@ -1,447 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package jose - -import ( - "bytes" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "fmt" - "io" - "testing" -) - -type staticNonceSource string - -func (sns staticNonceSource) Nonce() (string, error) { - return string(sns), nil -} - -func RoundtripJWS(sigAlg SignatureAlgorithm, serializer func(*JsonWebSignature) (string, error), corrupter func(*JsonWebSignature), signingKey interface{}, verificationKey interface{}, nonce string) error { - signer, err := NewSigner(sigAlg, signingKey) - if err != nil { - return fmt.Errorf("error on new signer: %s", err) - } - - if nonce != "" { - signer.SetNonceSource(staticNonceSource(nonce)) - } - - input := []byte("Lorem ipsum dolor sit amet") - obj, err := signer.Sign(input) - if err != nil { - return fmt.Errorf("error on sign: %s", err) - } - - msg, err := serializer(obj) - if err != nil { - return fmt.Errorf("error on serialize: %s", err) - } - - obj, err = ParseSigned(msg) - if err != nil { - return fmt.Errorf("error on parse: %s", err) - } - - // (Maybe) mangle the object - corrupter(obj) - - output, err := obj.Verify(verificationKey) - if err != nil { - return fmt.Errorf("error on verify: %s", err) - } - - // Check that verify works with embedded keys (if present) - for i, sig := range obj.Signatures { - if sig.Header.JsonWebKey != nil { - _, err = obj.Verify(sig.Header.JsonWebKey) - if err != nil { - return fmt.Errorf("error on verify with embedded key %d: %s", i, err) - } - } - - // Check that the nonce correctly round-tripped (if present) - if sig.Header.Nonce != nonce { - return fmt.Errorf("Incorrect nonce returned: [%s]", sig.Header.Nonce) - } - } - - if bytes.Compare(output, input) != 0 { - return fmt.Errorf("input/output do not match, got '%s', expected '%s'", output, input) - } - - return nil -} - -func TestRoundtripsJWS(t *testing.T) { - // Test matrix - sigAlgs := []SignatureAlgorithm{RS256, RS384, RS512, PS256, PS384, PS512, HS256, HS384, HS512, ES256, ES384, ES512} - - serializers := []func(*JsonWebSignature) (string, error){ - func(obj *JsonWebSignature) (string, error) { return obj.CompactSerialize() }, - func(obj *JsonWebSignature) (string, error) { return obj.FullSerialize(), nil }, - } - - corrupter := func(obj *JsonWebSignature) {} - - for _, alg := range sigAlgs { - signingKey, verificationKey := GenerateSigningTestKey(alg) - - for i, serializer := range serializers { - err := RoundtripJWS(alg, serializer, corrupter, signingKey, verificationKey, "test_nonce") - if err != nil { - t.Error(err, alg, i) - } - } - } -} - -func TestRoundtripsJWSCorruptSignature(t *testing.T) { - // Test matrix - sigAlgs := []SignatureAlgorithm{RS256, RS384, RS512, PS256, PS384, PS512, HS256, HS384, HS512, ES256, ES384, ES512} - - serializers := []func(*JsonWebSignature) (string, error){ - func(obj *JsonWebSignature) (string, error) { return obj.CompactSerialize() }, - func(obj *JsonWebSignature) (string, error) { return obj.FullSerialize(), nil }, - } - - corrupters := []func(*JsonWebSignature){ - func(obj *JsonWebSignature) { - // Changes bytes in signature - obj.Signatures[0].Signature[10]++ - }, - func(obj *JsonWebSignature) { - // Set totally invalid signature - obj.Signatures[0].Signature = []byte("###") - }, - } - - // Test all different configurations - for _, alg := range sigAlgs { - signingKey, verificationKey := GenerateSigningTestKey(alg) - - for i, serializer := range serializers { - for j, corrupter := range corrupters { - err := RoundtripJWS(alg, serializer, corrupter, signingKey, verificationKey, "test_nonce") - if err == nil { - t.Error("failed to detect corrupt signature", err, alg, i, j) - } - } - } - } -} - -func TestSignerWithBrokenRand(t *testing.T) { - sigAlgs := []SignatureAlgorithm{RS256, RS384, RS512, PS256, PS384, PS512} - - serializer := func(obj *JsonWebSignature) (string, error) { return obj.CompactSerialize() } - corrupter := func(obj *JsonWebSignature) {} - - // Break rand reader - readers := []func() io.Reader{ - // Totally broken - func() io.Reader { return bytes.NewReader([]byte{}) }, - // Not enough bytes - func() io.Reader { return io.LimitReader(rand.Reader, 20) }, - } - - defer resetRandReader() - - for _, alg := range sigAlgs { - signingKey, verificationKey := GenerateSigningTestKey(alg) - for i, getReader := range readers { - randReader = getReader() - err := RoundtripJWS(alg, serializer, corrupter, signingKey, verificationKey, "test_nonce") - if err == nil { - t.Error("signer should fail if rand is broken", alg, i) - } - } - } -} - -func TestJWSInvalidKey(t *testing.T) { - signingKey0, verificationKey0 := GenerateSigningTestKey(RS256) - _, verificationKey1 := GenerateSigningTestKey(ES256) - - signer, err := NewSigner(RS256, signingKey0) - if err != nil { - panic(err) - } - - input := []byte("Lorem ipsum dolor sit amet") - obj, err := signer.Sign(input) - if err != nil { - panic(err) - } - - // Must work with correct key - _, err = obj.Verify(verificationKey0) - if err != nil { - t.Error("error on verify", err) - } - - // Must not work with incorrect key - _, err = obj.Verify(verificationKey1) - if err == nil { - t.Error("verification should fail with incorrect key") - } - - // Must not work with invalid key - _, err = obj.Verify("") - if err == nil { - t.Error("verification should fail with incorrect key") - } -} - -func TestMultiRecipientJWS(t *testing.T) { - signer := NewMultiSigner() - - sharedKey := []byte{ - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, - } - - signer.AddRecipient(RS256, rsaTestKey) - signer.AddRecipient(HS384, sharedKey) - - input := []byte("Lorem ipsum dolor sit amet") - obj, err := signer.Sign(input) - if err != nil { - t.Error("error on sign: ", err) - return - } - - _, err = obj.CompactSerialize() - if err == nil { - t.Error("message with multiple recipient was compact serialized") - } - - msg := obj.FullSerialize() - - obj, err = ParseSigned(msg) - if err != nil { - t.Error("error on parse: ", err) - return - } - - output, err := obj.Verify(&rsaTestKey.PublicKey) - if err != nil { - t.Error("error on verify: ", err) - return - } - - if bytes.Compare(output, input) != 0 { - t.Error("input/output do not match", output, input) - return - } - - output, err = obj.Verify(sharedKey) - if err != nil { - t.Error("error on verify: ", err) - return - } - - if bytes.Compare(output, input) != 0 { - t.Error("input/output do not match", output, input) - return - } -} - -func GenerateSigningTestKey(sigAlg SignatureAlgorithm) (sig, ver interface{}) { - switch sigAlg { - case RS256, RS384, RS512, PS256, PS384, PS512: - sig = rsaTestKey - ver = &rsaTestKey.PublicKey - case HS256, HS384, HS512: - sig, _, _ = randomKeyGenerator{size: 16}.genKey() - ver = sig - case ES256: - key, _ := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) - sig = key - ver = &key.PublicKey - case ES384: - key, _ := ecdsa.GenerateKey(elliptic.P384(), rand.Reader) - sig = key - ver = &key.PublicKey - case ES512: - key, _ := ecdsa.GenerateKey(elliptic.P521(), rand.Reader) - sig = key - ver = &key.PublicKey - default: - panic("Must update test case") - } - - return -} - -func TestInvalidSignerAlg(t *testing.T) { - _, err := NewSigner("XYZ", nil) - if err == nil { - t.Error("should not accept invalid algorithm") - } - - _, err = NewSigner("XYZ", []byte{}) - if err == nil { - t.Error("should not accept invalid algorithm") - } -} - -func TestInvalidJWS(t *testing.T) { - signer, err := NewSigner(PS256, rsaTestKey) - if err != nil { - panic(err) - } - - obj, err := signer.Sign([]byte("Lorem ipsum dolor sit amet")) - obj.Signatures[0].header = &rawHeader{ - Crit: []string{"TEST"}, - } - - _, err = obj.Verify(&rsaTestKey.PublicKey) - if err == nil { - t.Error("should not verify message with unknown crit header") - } - - // Try without alg header - obj.Signatures[0].protected = &rawHeader{} - obj.Signatures[0].header = &rawHeader{} - - _, err = obj.Verify(&rsaTestKey.PublicKey) - if err == nil { - t.Error("should not verify message with missing headers") - } -} - -func TestSignerKid(t *testing.T) { - kid := "DEADBEEF" - payload := []byte("Lorem ipsum dolor sit amet") - - key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) - if err != nil { - t.Error("problem generating test signing key", err) - } - - basejwk := JsonWebKey{Key: key} - jsonbar, err := basejwk.MarshalJSON() - if err != nil { - t.Error("problem marshalling base JWK", err) - } - - var jsonmsi map[string]interface{} - err = UnmarshalJSON(jsonbar, &jsonmsi) - if err != nil { - t.Error("problem unmarshalling base JWK", err) - } - jsonmsi["kid"] = kid - jsonbar2, err := MarshalJSON(jsonmsi) - if err != nil { - t.Error("problem marshalling kided JWK", err) - } - - var jwk JsonWebKey - err = jwk.UnmarshalJSON(jsonbar2) - if err != nil { - t.Error("problem unmarshalling kided JWK", err) - } - - signer, err := NewSigner(ES256, &jwk) - if err != nil { - t.Error("problem creating signer", err) - } - signed, err := signer.Sign(payload) - - serialized := signed.FullSerialize() - - parsed, err := ParseSigned(serialized) - if err != nil { - t.Error("problem parsing signed object", err) - } - - if parsed.Signatures[0].Header.KeyID != kid { - t.Error("KeyID did not survive trip") - } -} - -func TestEmbedJwk(t *testing.T) { - var payload = []byte("Lorem ipsum dolor sit amet") - key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) - if err != nil { - t.Error("Failed to generate key") - } - - signer, err := NewSigner(ES256, key) - if err != nil { - t.Error("Failed to create signer") - } - - object, err := signer.Sign(payload) - if err != nil { - t.Error("Failed to sign payload") - } - - object, err = ParseSigned(object.FullSerialize()) - if err != nil { - t.Error("Failed to parse jws") - } - - if object.Signatures[0].protected.Jwk == nil { - t.Error("JWK isn't set in protected header") - } - - // Now sign it again, but don't embed JWK. - signer.SetEmbedJwk(false) - - object, err = signer.Sign(payload) - if err != nil { - t.Error("Failed to sign payload") - } - - object, err = ParseSigned(object.FullSerialize()) - if err != nil { - t.Error("Failed to parse jws") - } - - if object.Signatures[0].protected.Jwk != nil { - t.Error("JWK is set in protected header") - } -} - -func TestSignerWithJWKAndKeyID(t *testing.T) { - enc, err := NewSigner(HS256, &JsonWebKey{ - KeyID: "test-id", - Key: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, - }) - if err != nil { - t.Error(err) - } - - signed, _ := enc.Sign([]byte("Lorem ipsum dolor sit amet")) - - serialized1, _ := signed.CompactSerialize() - serialized2 := signed.FullSerialize() - - parsed1, _ := ParseSigned(serialized1) - parsed2, _ := ParseSigned(serialized2) - - if parsed1.Signatures[0].Header.KeyID != "test-id" { - t.Errorf("expected message to have key id from JWK, but found '%s' instead", parsed1.Signatures[0].Header.KeyID) - } - if parsed2.Signatures[0].Header.KeyID != "test-id" { - t.Errorf("expected message to have key id from JWK, but found '%s' instead", parsed2.Signatures[0].Header.KeyID) - } -} diff --git a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/symmetric_test.go b/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/symmetric_test.go deleted file mode 100644 index 67f535e3..00000000 --- a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/symmetric_test.go +++ /dev/null @@ -1,131 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package jose - -import ( - "bytes" - "crypto/cipher" - "crypto/rand" - "io" - "testing" -) - -func TestInvalidSymmetricAlgorithms(t *testing.T) { - _, err := newSymmetricRecipient("XYZ", []byte{}) - if err != ErrUnsupportedAlgorithm { - t.Error("should not accept invalid algorithm") - } - - enc := &symmetricKeyCipher{} - _, err = enc.encryptKey([]byte{}, "XYZ") - if err != ErrUnsupportedAlgorithm { - t.Error("should not accept invalid algorithm") - } -} - -func TestAeadErrors(t *testing.T) { - aead := &aeadContentCipher{ - keyBytes: 16, - authtagBytes: 16, - getAead: func(key []byte) (cipher.AEAD, error) { - return nil, ErrCryptoFailure - }, - } - - parts, err := aead.encrypt([]byte{}, []byte{}, []byte{}) - if err != ErrCryptoFailure { - t.Error("should handle aead failure") - } - - _, err = aead.decrypt([]byte{}, []byte{}, parts) - if err != ErrCryptoFailure { - t.Error("should handle aead failure") - } -} - -func TestInvalidKey(t *testing.T) { - gcm := newAESGCM(16).(*aeadContentCipher) - _, err := gcm.getAead([]byte{}) - if err == nil { - t.Error("should not accept invalid key") - } -} - -func TestStaticKeyGen(t *testing.T) { - key := make([]byte, 32) - io.ReadFull(rand.Reader, key) - - gen := &staticKeyGenerator{key: key} - if gen.keySize() != len(key) { - t.Error("static key generator reports incorrect size") - } - - generated, _, err := gen.genKey() - if err != nil { - t.Error("static key generator should always succeed", err) - } - if !bytes.Equal(generated, key) { - t.Error("static key generator returns different data") - } -} - -func TestVectorsAESGCM(t *testing.T) { - // Source: http://tools.ietf.org/html/draft-ietf-jose-json-web-encryption-29#appendix-A.1 - plaintext := []byte{ - 84, 104, 101, 32, 116, 114, 117, 101, 32, 115, 105, 103, 110, 32, - 111, 102, 32, 105, 110, 116, 101, 108, 108, 105, 103, 101, 110, 99, - 101, 32, 105, 115, 32, 110, 111, 116, 32, 107, 110, 111, 119, 108, - 101, 100, 103, 101, 32, 98, 117, 116, 32, 105, 109, 97, 103, 105, - 110, 97, 116, 105, 111, 110, 46} - - aad := []byte{ - 101, 121, 74, 104, 98, 71, 99, 105, 79, 105, 74, 83, 85, 48, 69, - 116, 84, 48, 70, 70, 85, 67, 73, 115, 73, 109, 86, 117, 89, 121, 73, - 54, 73, 107, 69, 121, 78, 84, 90, 72, 81, 48, 48, 105, 102, 81} - - expectedCiphertext := []byte{ - 229, 236, 166, 241, 53, 191, 115, 196, 174, 43, 73, 109, 39, 122, - 233, 96, 140, 206, 120, 52, 51, 237, 48, 11, 190, 219, 186, 80, 111, - 104, 50, 142, 47, 167, 59, 61, 181, 127, 196, 21, 40, 82, 242, 32, - 123, 143, 168, 226, 73, 216, 176, 144, 138, 247, 106, 60, 16, 205, - 160, 109, 64, 63, 192} - - expectedAuthtag := []byte{ - 92, 80, 104, 49, 133, 25, 161, 215, 173, 101, 219, 211, 136, 91, 210, 145} - - // Mock random reader - randReader = bytes.NewReader([]byte{ - 177, 161, 244, 128, 84, 143, 225, 115, 63, 180, 3, 255, 107, 154, - 212, 246, 138, 7, 110, 91, 112, 46, 34, 105, 47, 130, 203, 46, 122, - 234, 64, 252, 227, 197, 117, 252, 2, 219, 233, 68, 180, 225, 77, 219}) - defer resetRandReader() - - enc := newAESGCM(32) - key, _, _ := randomKeyGenerator{size: 32}.genKey() - out, err := enc.encrypt(key, aad, plaintext) - if err != nil { - t.Error("Unable to encrypt:", err) - return - } - - if bytes.Compare(out.ciphertext, expectedCiphertext) != 0 { - t.Error("Ciphertext did not match") - } - if bytes.Compare(out.tag, expectedAuthtag) != 0 { - t.Error("Auth tag did not match") - } -} diff --git a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/utils_test.go b/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/utils_test.go deleted file mode 100644 index 6ad622da..00000000 --- a/vendor/rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/utils_test.go +++ /dev/null @@ -1,225 +0,0 @@ -/*- - * Copyright 2014 Square Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package jose - -import ( - "crypto/ecdsa" - "crypto/rand" - "crypto/rsa" - "encoding/base64" - "encoding/hex" - "math/big" - "regexp" - "testing" -) - -// Reset random reader to original value -func resetRandReader() { - randReader = rand.Reader -} - -// Build big int from hex-encoded string. Strips whitespace (for testing). -func fromHexInt(base16 string) *big.Int { - re := regexp.MustCompile(`\s+`) - val, ok := new(big.Int).SetString(re.ReplaceAllString(base16, ""), 16) - if !ok { - panic("Invalid test data") - } - return val -} - -// Build big int from base64-encoded string. Strips whitespace (for testing). -func fromBase64Int(base64 string) *big.Int { - re := regexp.MustCompile(`\s+`) - val, err := base64URLDecode(re.ReplaceAllString(base64, "")) - if err != nil { - panic("Invalid test data") - } - return new(big.Int).SetBytes(val) -} - -// Decode hex-encoded string into byte array. Strips whitespace (for testing). -func fromHexBytes(base16 string) []byte { - re := regexp.MustCompile(`\s+`) - val, err := hex.DecodeString(re.ReplaceAllString(base16, "")) - if err != nil { - panic("Invalid test data") - } - return val -} - -// Decode base64-encoded string into byte array. Strips whitespace (for testing). -func fromBase64Bytes(b64 string) []byte { - re := regexp.MustCompile(`\s+`) - val, err := base64.StdEncoding.DecodeString(re.ReplaceAllString(b64, "")) - if err != nil { - panic("Invalid test data") - } - return val -} - -// Test vectors below taken from crypto/x509/x509_test.go in the Go std lib. - -var pkixPublicKey = `-----BEGIN PUBLIC KEY----- -MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA3VoPN9PKUjKFLMwOge6+ -wnDi8sbETGIx2FKXGgqtAKpzmem53kRGEQg8WeqRmp12wgp74TGpkEXsGae7RS1k -enJCnma4fii+noGH7R0qKgHvPrI2Bwa9hzsH8tHxpyM3qrXslOmD45EH9SxIDUBJ -FehNdaPbLP1gFyahKMsdfxFJLUvbUycuZSJ2ZnIgeVxwm4qbSvZInL9Iu4FzuPtg -fINKcbbovy1qq4KvPIrXzhbY3PWDc6btxCf3SE0JdE1MCPThntB62/bLMSQ7xdDR -FF53oIpvxe/SCOymfWq/LW849Ytv3Xwod0+wzAP8STXG4HSELS4UedPYeHJJJYcZ -+QIDAQAB ------END PUBLIC KEY-----` - -var pkcs1PrivateKey = `-----BEGIN RSA PRIVATE KEY----- -MIIBOgIBAAJBALKZD0nEffqM1ACuak0bijtqE2QrI/KLADv7l3kK3ppMyCuLKoF0 -fd7Ai2KW5ToIwzFofvJcS/STa6HA5gQenRUCAwEAAQJBAIq9amn00aS0h/CrjXqu -/ThglAXJmZhOMPVn4eiu7/ROixi9sex436MaVeMqSNf7Ex9a8fRNfWss7Sqd9eWu -RTUCIQDasvGASLqmjeffBNLTXV2A5g4t+kLVCpsEIZAycV5GswIhANEPLmax0ME/ -EO+ZJ79TJKN5yiGBRsv5yvx5UiHxajEXAiAhAol5N4EUyq6I9w1rYdhPMGpLfk7A -IU2snfRJ6Nq2CQIgFrPsWRCkV+gOYcajD17rEqmuLrdIRexpg8N1DOSXoJ8CIGlS -tAboUGBxTDq3ZroNism3DaMIbKPyYrAqhKov1h5V ------END RSA PRIVATE KEY-----` - -var ecdsaSHA256p384CertPem = ` ------BEGIN CERTIFICATE----- -MIICSjCCAdECCQDje/no7mXkVzAKBggqhkjOPQQDAjCBjjELMAkGA1UEBhMCVVMx -EzARBgNVBAgMCkNhbGlmb3JuaWExFjAUBgNVBAcMDU1vdW50YWluIFZpZXcxFDAS -BgNVBAoMC0dvb2dsZSwgSW5jMRcwFQYDVQQDDA53d3cuZ29vZ2xlLmNvbTEjMCEG -CSqGSIb3DQEJARYUZ29sYW5nLWRldkBnbWFpbC5jb20wHhcNMTIwNTIxMDYxMDM0 -WhcNMjIwNTE5MDYxMDM0WjCBjjELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNhbGlm -b3JuaWExFjAUBgNVBAcMDU1vdW50YWluIFZpZXcxFDASBgNVBAoMC0dvb2dsZSwg -SW5jMRcwFQYDVQQDDA53d3cuZ29vZ2xlLmNvbTEjMCEGCSqGSIb3DQEJARYUZ29s -YW5nLWRldkBnbWFpbC5jb20wdjAQBgcqhkjOPQIBBgUrgQQAIgNiAARRuzRNIKRK -jIktEmXanNmrTR/q/FaHXLhWRZ6nHWe26Fw7Rsrbk+VjGy4vfWtNn7xSFKrOu5ze -qxKnmE0h5E480MNgrUiRkaGO2GMJJVmxx20aqkXOk59U8yGA4CghE6MwCgYIKoZI -zj0EAwIDZwAwZAIwBZEN8gvmRmfeP/9C1PRLzODIY4JqWub2PLRT4mv9GU+yw3Gr -PU9A3CHMdEcdw/MEAjBBO1lId8KOCh9UZunsSMfqXiVurpzmhWd6VYZ/32G+M+Mh -3yILeYQzllt/g0rKVRk= ------END CERTIFICATE-----` - -var ecdsaSHA256p384CertDer = fromBase64Bytes(` -MIICSjCCAdECCQDje/no7mXkVzAKBggqhkjOPQQDAjCBjjELMAkGA1UEBhMCVVMx -EzARBgNVBAgMCkNhbGlmb3JuaWExFjAUBgNVBAcMDU1vdW50YWluIFZpZXcxFDAS -BgNVBAoMC0dvb2dsZSwgSW5jMRcwFQYDVQQDDA53d3cuZ29vZ2xlLmNvbTEjMCEG -CSqGSIb3DQEJARYUZ29sYW5nLWRldkBnbWFpbC5jb20wHhcNMTIwNTIxMDYxMDM0 -WhcNMjIwNTE5MDYxMDM0WjCBjjELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNhbGlm -b3JuaWExFjAUBgNVBAcMDU1vdW50YWluIFZpZXcxFDASBgNVBAoMC0dvb2dsZSwg -SW5jMRcwFQYDVQQDDA53d3cuZ29vZ2xlLmNvbTEjMCEGCSqGSIb3DQEJARYUZ29s -YW5nLWRldkBnbWFpbC5jb20wdjAQBgcqhkjOPQIBBgUrgQQAIgNiAARRuzRNIKRK -jIktEmXanNmrTR/q/FaHXLhWRZ6nHWe26Fw7Rsrbk+VjGy4vfWtNn7xSFKrOu5ze -qxKnmE0h5E480MNgrUiRkaGO2GMJJVmxx20aqkXOk59U8yGA4CghE6MwCgYIKoZI -zj0EAwIDZwAwZAIwBZEN8gvmRmfeP/9C1PRLzODIY4JqWub2PLRT4mv9GU+yw3Gr -PU9A3CHMdEcdw/MEAjBBO1lId8KOCh9UZunsSMfqXiVurpzmhWd6VYZ/32G+M+Mh -3yILeYQzllt/g0rKVRk=`) - -var pkcs8ECPrivateKey = ` ------BEGIN PRIVATE KEY----- -MIHtAgEAMBAGByqGSM49AgEGBSuBBAAjBIHVMIHSAgEBBEHqkl65VsjYDQWIHfgv -zQLPa0JZBsaJI16mjiH8k6VA4lgfK/KNldlEsY433X7wIzo43u8OpX7Nv7n8pVRH -15XWK6GBiQOBhgAEAfDuikMI4bWsyse7t8iSCmjt9fneW/qStZuIPuVLo7mSJdud -Cs3J/x9wOnnhLv1u+0atnq5HKKdL4ff3itJPlhmSAQzByKQ5LTvB7d6fn95GJVK/ -hNuS5qGBpB7qeMXVFoki0/2RZIOway8/fXjmNYwe4v/XB5LLn4hcTvEUGYcF8M9K ------END PRIVATE KEY-----` - -var ecPrivateKey = ` ------BEGIN EC PRIVATE KEY----- -MIHcAgEBBEIBv2rdY9mWGD/UgiuXB0LJcUzgaB6TXq/Ra1jrZKBV3IGSacM5QDFu -N8yrywiQaTDEqn1zVcLwrnqoQux3gWN1jxugBwYFK4EEACOhgYkDgYYABAFJgaM/ -2a3+gE6Khm/1PYftqNwAzQ21HSLp27q2lTN+GBFho691ARFRkr9UzlQ8gRnhkTbu -yGfASamlHsYlr3Tv+gFc4BY8SU0q8kzpQ0dOHWFk7dfGFmKwhJrSFIIOeRn/LY03 -XsVFctNDsGhobS2JguQrxhGx8Ll7vQCakV/PEmCQJA== ------END EC PRIVATE KEY-----` - -var ecPrivateKeyDer = fromBase64Bytes(` -MIHcAgEBBEIBv2rdY9mWGD/UgiuXB0LJcUzgaB6TXq/Ra1jrZKBV3IGSacM5QDFu -N8yrywiQaTDEqn1zVcLwrnqoQux3gWN1jxugBwYFK4EEACOhgYkDgYYABAFJgaM/ -2a3+gE6Khm/1PYftqNwAzQ21HSLp27q2lTN+GBFho691ARFRkr9UzlQ8gRnhkTbu -yGfASamlHsYlr3Tv+gFc4BY8SU0q8kzpQ0dOHWFk7dfGFmKwhJrSFIIOeRn/LY03 -XsVFctNDsGhobS2JguQrxhGx8Ll7vQCakV/PEmCQJA==`) - -var invalidPemKey = ` ------BEGIN PUBLIC KEY----- -MIHcAgEBBEIBv2rdY9mWGD/UgiuXB0LJcUzgaB6TXq/Ra1jrZKBV3IGSacM5QDFu -XsVFctNDsGhobS2JguQrxhGx8Ll7vQCakV/PEmCQJA== ------END PUBLIC KEY-----` - -func TestLoadPublicKey(t *testing.T) { - pub, err := LoadPublicKey([]byte(pkixPublicKey)) - switch pub.(type) { - case *rsa.PublicKey: - default: - t.Error("failed to parse RSA PKIX public key:", err) - } - - pub, err = LoadPublicKey([]byte(ecdsaSHA256p384CertPem)) - switch pub.(type) { - case *ecdsa.PublicKey: - default: - t.Error("failed to parse ECDSA X.509 cert:", err) - } - - pub, err = LoadPublicKey([]byte(ecdsaSHA256p384CertDer)) - switch pub.(type) { - case *ecdsa.PublicKey: - default: - t.Error("failed to parse ECDSA X.509 cert:", err) - } - - pub, err = LoadPublicKey([]byte("###")) - if err == nil { - t.Error("should not parse invalid key") - } - - pub, err = LoadPublicKey([]byte(invalidPemKey)) - if err == nil { - t.Error("should not parse invalid key") - } -} - -func TestLoadPrivateKey(t *testing.T) { - priv, err := LoadPrivateKey([]byte(pkcs1PrivateKey)) - switch priv.(type) { - case *rsa.PrivateKey: - default: - t.Error("failed to parse RSA PKCS1 private key:", err) - } - - priv, err = LoadPrivateKey([]byte(pkcs8ECPrivateKey)) - if _, ok := priv.(*ecdsa.PrivateKey); !ok { - t.Error("failed to parse EC PKCS8 private key:", err) - } - - priv, err = LoadPrivateKey([]byte(ecPrivateKey)) - if _, ok := priv.(*ecdsa.PrivateKey); !ok { - t.Error("failed to parse EC private key:", err) - } - - priv, err = LoadPrivateKey([]byte(ecPrivateKeyDer)) - if _, ok := priv.(*ecdsa.PrivateKey); !ok { - t.Error("failed to parse EC private key:", err) - } - - priv, err = LoadPrivateKey([]byte("###")) - if err == nil { - t.Error("should not parse invalid key") - } - - priv, err = LoadPrivateKey([]byte(invalidPemKey)) - if err == nil { - t.Error("should not parse invalid key") - } -} diff --git a/vendor/rsc.io/letsencrypt/vendor/vendor.json b/vendor/rsc.io/letsencrypt/vendor/vendor.json deleted file mode 100644 index 8a424110..00000000 --- a/vendor/rsc.io/letsencrypt/vendor/vendor.json +++ /dev/null @@ -1,31 +0,0 @@ -{ - "comment": "", - "ignore": "", - "package": [ - { - "checksumSHA1": "CHmdoMriAboKW2nHYSXo0yBizaE=", - "path": "github.com/xenolf/lego/acme", - "revision": "ca19a90028e242e878585941c2a27c8f3b3efc25", - "revisionTime": "2016-03-28T16:28:34Z" - }, - { - "checksumSHA1": "jrheBzltbBE1frmNXQiu911T7dE=", - "path": "gopkg.in/square/go-jose.v1", - "revision": "40d457b439244b546f023d056628e5184136899b", - "revisionTime": "2016-03-29T20:33:11Z" - }, - { - "checksumSHA1": "fX4KSC9E1oX9yRx20Zjb3rVJHn4=", - "path": "gopkg.in/square/go-jose.v1/cipher", - "revision": "40d457b439244b546f023d056628e5184136899b", - "revisionTime": "2016-03-29T20:33:11Z" - }, - { - "checksumSHA1": "NxdXsIcLGuuX654ygsaOhoLsg6s=", - "path": "gopkg.in/square/go-jose.v1/json", - "revision": "40d457b439244b546f023d056628e5184136899b", - "revisionTime": "2016-03-29T20:33:11Z" - } - ], - "rootPath": "rsc.io/letsencrypt" -}